diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index f20ed52c78c27df0639c36ba867689786620990c..44cbc5c61fefab1e3dde8409785ac95bc41f6f3d 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -117,6 +117,14 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "Mapped: ", global_node_page_state(NR_FILE_MAPPED)); show_val_kb(m, "Shmem: ", i.sharedram); + + show_val_kb(m, "Shmem_replace: ", global_shmem_page_state(NR_REPLACE_PAGE_CACHE_PAGE)); + show_val_kb(m, "Shmem_unaccount_cache:", global_shmem_page_state(NR_UNACCOUNT_FROM_PAGE_CACHE)); + show_val_kb(m, "Shmem_add_sh: ", global_shmem_page_state(NR_SHMEM_ADD_TO_PAGE_CACHE)); + show_val_kb(m, "Shmem_del_sh: ", global_shmem_page_state(NR_SHMEM_DELETE_FROM_PAGE_CACHE)); + show_val_kb(m, "Shmem_collapse: ", global_shmem_page_state(NR_COLLAPSE_SHMEM)); + show_val_kb(m, "Shmem_init_mm: ", atomic_long_read(&init_mm.shmem_usage)); + show_val_kb(m, "Shmem_no_owner: ", global_shmem_page_state(NR_NO_OWNER)); show_val_kb(m, "KReclaimable: ", sreclaimable + global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE)); show_val_kb(m, "Slab: ", sreclaimable + sunreclaim); diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 14617d886df8d28cef8be9f61ad9222942596b71..c389e4855bc29d657784af23cc341e0cbbeee59a 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -83,6 +84,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) seq_put_decimal_ull_width(m, " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8); SEQ_PUT_DEC(" kB\nVmSwap:\t", swap); + seq_put_decimal_ll(m, "kB\nShemem_usage:\t", atomic_long_read(&mm->shmem_usage) << (PAGE_SHIFT-10)); #ifdef CONFIG_MEM_PURGEABLE SEQ_PUT_DEC(" kB\nPurgSum:\t", nr_purg_sum); SEQ_PUT_DEC(" kB\nPurgPin:\t", nr_purg_pin); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index d86bc1d2dcc3c7e03076158e5eed317431c835ec..a794cb1a049e41d789065d15775a31bc0e5bc6fe 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -598,6 +598,7 @@ struct mm_struct { #ifdef CONFIG_HUGETLB_PAGE atomic_long_t hugetlb_usage; #endif + atomic_long_t shmem_usage; struct work_struct async_put_work; #ifdef CONFIG_IOMMU_SUPPORT diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f4d955c67b54233c9a56a9a21530ab2ff36266a2..4488f6da76b63c78595780266d5ad63df7109e7b 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -230,6 +230,16 @@ enum node_stat_item { NR_VM_NODE_STAT_ITEMS }; +enum shmem_stat_item { + NR_REPLACE_PAGE_CACHE_PAGE, + NR_UNACCOUNT_FROM_PAGE_CACHE, + NR_SHMEM_ADD_TO_PAGE_CACHE, + NR_SHMEM_DELETE_FROM_PAGE_CACHE, + NR_COLLAPSE_SHMEM, + NR_NO_OWNER, + NR_SHMEM_STAT_ITEMS +}; + /* * Returns true if the value is measured in bytes (most vmstat values are * measured in pages). This defines the API part, the internal representation diff --git a/include/linux/sched.h b/include/linux/sched.h index a67583bfada70a49f875e3f8e7226fad2c0019b0..7fca4c25361b2edfadb89eb94fcdc941f9033d64 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -912,6 +912,8 @@ struct task_struct { /* Used for emulating ABI behavior of previous Linux versions: */ unsigned int personality; + unsigned int shmem_times; + long shmem_used; /* Scheduler bits, serialized by scheduler locks: */ unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 322dcbfcc933789a09793ca616dbfdaa2b2d2d29..f951915ab45aebcdc5cb627907160db2c9dcc202 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -9,6 +9,7 @@ #include #include #include +#include extern int sysctl_stat_interval; @@ -141,6 +142,25 @@ extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; +extern atomic_long_t vm_shmem_stat[NR_SHMEM_STAT_ITEMS]; + +static inline void shmem_page_state_add(long x, enum shmem_stat_item item) +{ + if (x > 0) + atomic_long_add(x, &vm_shmem_stat[item]); + else + atomic_long_add(-x, &vm_shmem_stat[item]); + if (current->mm) + atomic_long_add(x, ¤t->mm->shmem_usage); + else + atomic_long_add(x, &init_mm.shmem_usage); +} + +static inline unsigned long global_shmem_page_state(enum shmem_stat_item item) +{ + return atomic_long_read(&vm_shmem_stat[item]); +} + #ifdef CONFIG_NUMA static inline void zone_numa_state_add(long x, struct zone *zone, enum numa_stat_item item) diff --git a/init/init_task.c b/init/init_task.c index 65e4a3432c6e4d4344631731d562694b44957da0..d0f89f8c59499bd1924599a2720e3df67b23d18c 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -85,6 +85,8 @@ struct task_struct init_task .cpus_ptr = &init_task.cpus_mask, .cpus_mask = CPU_MASK_ALL, .nr_cpus_allowed= NR_CPUS, + .shmem_times = 0, + .shmem_used = 0, .mm = NULL, .active_mm = &init_mm, .restart_block = { diff --git a/kernel/fork.c b/kernel/fork.c index 38d67c0b81f2e8287308bb9d0fbc5b943ed4e9d2..269335606b4e1d3c3a06267354bc1a89ae36d3ff 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -670,6 +670,12 @@ static void check_mm(struct mm_struct *mm) pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n", mm_pgtables_bytes(mm)); + if (atomic_long_read(&mm->shmem_usage)) { + pr_info("task %s, left shmem %lu kB\n", current->comm, atomic_long_read(&mm->shmem_usage) << (PAGE_SHIFT-10)); + atomic_long_add(atomic_long_read(&mm->shmem_usage), &vm_shmem_stat[NR_NO_OWNER]); + atomic_long_set(&mm->shmem_usage, 0); + } + #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS VM_BUG_ON_MM(mm->pmd_huge_pte, mm); #endif @@ -1070,6 +1076,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, goto fail_nocontext; mm->user_ns = get_user_ns(user_ns); + atomic_long_set(&mm->shmem_usage, 0); return mm; fail_nocontext: diff --git a/mm/filemap.c b/mm/filemap.c index 125b69f59caadaa21468ad81c0301ab8272358fa..cda24a02658dc198a59935b249e94de26b8f8281 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -44,6 +44,8 @@ #include #include "internal.h" +#include + #define CREATE_TRACE_POINTS #include @@ -203,6 +205,8 @@ static void unaccount_page_cache_page(struct address_space *mapping, __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr); if (PageSwapBacked(page)) { __mod_lruvec_page_state(page, NR_SHMEM, -nr); + shmem_page_state_add(-nr, NR_UNACCOUNT_FROM_PAGE_CACHE); + current->shmem_used -= nr; if (PageTransHuge(page)) __dec_node_page_state(page, NR_SHMEM_THPS); } else if (PageTransHuge(page)) { @@ -814,10 +818,16 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) __dec_lruvec_page_state(old, NR_FILE_PAGES); if (!PageHuge(new)) __inc_lruvec_page_state(new, NR_FILE_PAGES); - if (PageSwapBacked(old)) + if (PageSwapBacked(old)) { __dec_lruvec_page_state(old, NR_SHMEM); - if (PageSwapBacked(new)) + shmem_page_state_add(-1, NR_REPLACE_PAGE_CACHE_PAGE); + current->shmem_used--; + } + if (PageSwapBacked(new)) { __inc_lruvec_page_state(new, NR_SHMEM); + shmem_page_state_add(1, NR_REPLACE_PAGE_CACHE_PAGE); + current->shmem_used++; + } xas_unlock_irqrestore(&xas, flags); if (freepage) freepage(old); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 969e57dde65f981a4b9dea8c5986aba8f80bc7f3..60d88623799d8d88c8d5a463d83deeaaadbafe0b 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -1869,8 +1870,10 @@ static void collapse_file(struct mm_struct *mm, if (nr_none) { __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none); - if (is_shmem) + if (is_shmem) { __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none); + shmem_page_state_add(nr_zone, NR_COLLAPSE_SHMEM); + } } xa_locked: diff --git a/mm/shmem.c b/mm/shmem.c index ae8adca3b56d1d5073b92983c87ffaa8d3899baa..b06994e0c0a1beea7c65cf1eb9fe3cf33d5bdc7b 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -38,6 +38,7 @@ #include #include #include +#include #include /* for arch/microblaze update_mmu_cache() */ @@ -718,6 +719,11 @@ static int shmem_add_to_page_cache(struct page *page, mapping->nrpages += nr; __mod_lruvec_page_state(page, NR_FILE_PAGES, nr); __mod_lruvec_page_state(page, NR_SHMEM, nr); + shmem_page_state_add(nr, NR_SHMEM_ADD_TO_PAGE_CACHE); + current->shmem_times++; + current->shmem_used += nr; + pr_err("chenjie %s", __func__); + WARN_ON(1); unlock: xas_unlock_irq(&xas); } while (xas_nomem(&xas, gfp)); @@ -750,6 +756,7 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap) mapping->nrpages--; __dec_lruvec_page_state(page, NR_FILE_PAGES); __dec_lruvec_page_state(page, NR_SHMEM); + shmem_page_state_add(-1, NR_SHMEM_DELETE_FROM_PAGE_CACHE); xa_unlock_irq(&mapping->i_pages); put_page(page); BUG_ON(error); diff --git a/mm/vmstat.c b/mm/vmstat.c index 3e89021a3f750ebc98756553a69eadce775d04f7..e767f4c12bd235631e9fad1952712281bc449626 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -167,6 +167,9 @@ EXPORT_SYMBOL(vm_zone_stat); EXPORT_SYMBOL(vm_numa_stat); EXPORT_SYMBOL(vm_node_stat); +atomic_long_t vm_shmem_stat[NR_SHMEM_STAT_ITEMS] __cacheline_aligned_in_smp; +EXPORT_SYMBOL(vm_shmem_stat); + #ifdef CONFIG_SMP int calculate_pressure_threshold(struct zone *zone)