From 1cd52589256afe00653dea183b06b9649b841267 Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Tue, 7 Dec 2021 10:48:35 +0800 Subject: [PATCH 01/12] anolis: mm/damon: Add NUMA local and remote variables in 'damon_region' ANBZ: #268 ANBZ: #20937 The purpose of adding these two variables 'local' & 'remote' is to obtain the struct 'damon_region' numa access status. Signed-off-by: Xin Hao Signed-off-by: Rongwei Wang Reviewed-by: zhongjiang-ali Acked-by: Xunlei Pang Signed-off-by: Weilin Tong --- include/linux/damon.h | 4 ++++ mm/damon/core.c | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/include/linux/damon.h b/include/linux/damon.h index 343132a146cf..bc4793746cfd 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -42,6 +42,8 @@ struct damon_addr_range { * @nr_accesses: Access frequency of this region. * @list: List head for siblings. * @age: Age of this region. + * @local: Local numa node accesses. + * @remote: Remote numa node accesses. * * @age is initially zero, increased for each aggregation interval, and reset * to zero again if the access frequency is significantly changed. If two @@ -57,6 +59,8 @@ struct damon_region { unsigned int age; /* private: Internal value for age calculation. */ unsigned int last_nr_accesses; + unsigned long local; + unsigned long remote; }; /** diff --git a/mm/damon/core.c b/mm/damon/core.c index 43e4fe7ef17e..86d7ac6e1412 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -132,6 +132,8 @@ struct damon_region *damon_new_region(unsigned long start, unsigned long end) region->age = 0; region->last_nr_accesses = 0; + region->local = 0; + region->remote = 0; return region; } @@ -1127,6 +1129,8 @@ static void damon_merge_two_regions(struct damon_target *t, l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / (sz_l + sz_r); + l->remote = (l->remote * sz_l + r->remote * sz_r) / (sz_l + sz_r); + l->local = (l->local * sz_l + r->local * sz_r) / (sz_l + sz_r); l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); l->ar.end = r->ar.end; damon_destroy_region(r, t); @@ -1216,6 +1220,8 @@ static void damon_split_region_at(struct damon_target *t, new->age = r->age; new->last_nr_accesses = r->last_nr_accesses; new->nr_accesses = r->nr_accesses; + new->local = r->local; + new->remote = r->remote; damon_insert_region(new, r, damon_next_region(r), t); } -- Gitee From 1a8b48d68c213f4d73f41bb360639e3a8d1277a0 Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Tue, 7 Dec 2021 10:48:35 +0800 Subject: [PATCH 02/12] anolis: mm/damon: Add 'damon_region' NUMA fault simulation support ANBZ: #268 ANBZ: #20937 These codes development here refers to NUMA balance code, it will cause a page_fault, in do_numa_page(), we will count 'damon_region' NUMA local and remote values. Signed-off-by: Xin Hao Signed-off-by: Rongwei Wang Reviewed-by: zhongjiang-ali Acked-by: Xunlei Pang Signed-off-by: Weilin Tong --- mm/damon/vaddr.c | 75 ++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 66 insertions(+), 9 deletions(-) diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 5764b9885e7d..24e22fec54ba 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -299,12 +299,57 @@ static void damon_va_update(struct damon_ctx *ctx) } } -static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, +static bool damon_pmdp_mknone(pmd_t *pmd, struct mm_walk *walk, unsigned long addr) +{ + bool preserve_write; + pmd_t entry = *pmd; + + if (is_huge_zero_pmd(entry) || pmd_protnone(entry)) + return false; + + if (pmd_present(entry)) { + preserve_write = pmd_write(entry); + entry = pmdp_invalidate(walk->vma, addr, pmd); + entry = pmd_modify(entry, PAGE_NONE); + if (preserve_write) + entry = pmd_mkwrite(entry, walk->vma); + + set_pmd_at(walk->mm, addr, pmd, entry); + return true; + } + return false; +} + +static bool damon_ptep_mknone(pte_t *pte, struct mm_walk *walk, unsigned long addr) +{ + pte_t oldpte, ptent; + bool preserve_write; + + oldpte = *pte; + if (pte_protnone(oldpte)) + return false; + + if (pte_present(oldpte)) { + preserve_write = pte_write(oldpte); + oldpte = ptep_modify_prot_start(walk->vma, addr, pte); + ptent = pte_modify(oldpte, PAGE_NONE); + + if (preserve_write) + ptent = pte_mkwrite(ptent, walk->vma); + + ptep_modify_prot_commit(walk->vma, addr, pte, oldpte, ptent); + return true; + } + return false; +} + +static int damon_va_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { pte_t *pte; pmd_t pmde; spinlock_t *ptl; + bool result = false; if (pmd_trans_huge(pmdp_get(pmd))) { ptl = pmd_lock(walk->mm, pmd); @@ -317,7 +362,14 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, if (pmd_trans_huge(pmde)) { damon_pmdp_mkold(pmd, walk->vma, addr); + if (nr_online_nodes > 1) + result = damon_pmdp_mknone(pmd, walk, addr); spin_unlock(ptl); + if (result) { + unsigned long haddr = addr & HPAGE_PMD_MASK; + + flush_tlb_range(walk->vma, haddr, haddr + HPAGE_PMD_SIZE); + } return 0; } spin_unlock(ptl); @@ -328,11 +380,16 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, walk->action = ACTION_AGAIN; return 0; } - if (!pte_present(ptep_get(pte))) - goto out; + if (!pte_present(*pte)) { + pte_unmap_unlock(pte, ptl); + return 0; + } damon_ptep_mkold(pte, walk->vma, addr); -out: + if (nr_online_nodes > 1) + result = damon_ptep_mknone(pte, walk, addr); pte_unmap_unlock(pte, ptl); + if (result) + flush_tlb_page(walk->vma, addr); return 0; } @@ -389,16 +446,16 @@ static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask, #define damon_mkold_hugetlb_entry NULL #endif /* CONFIG_HUGETLB_PAGE */ -static const struct mm_walk_ops damon_mkold_ops = { - .pmd_entry = damon_mkold_pmd_entry, +static const struct mm_walk_ops damon_va_ops = { + .pmd_entry = damon_va_pmd_entry, .hugetlb_entry = damon_mkold_hugetlb_entry, .walk_lock = PGWALK_RDLOCK, }; -static void damon_va_mkold(struct mm_struct *mm, unsigned long addr) +static void damon_va_check(struct mm_struct *mm, unsigned long addr) { mmap_read_lock(mm); - walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL); + walk_page_range(mm, addr, addr + 1, &damon_va_ops, NULL); mmap_read_unlock(mm); } @@ -411,7 +468,7 @@ static void __damon_va_prepare_access_check(struct mm_struct *mm, { r->sampling_addr = damon_rand(r->ar.start, r->ar.end); - damon_va_mkold(mm, r->sampling_addr); + damon_va_check(mm, r->sampling_addr); } static void damon_va_prepare_access_checks(struct damon_ctx *ctx) -- Gitee From b4ff5a5b53aad09ca29004e61aaad70512a8ffc0 Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Thu, 9 Dec 2021 23:20:48 +0800 Subject: [PATCH 03/12] anolis: mm/damon: Add 'damon_region' NUMA local and remote access statistics ANBZ: #268 ANBZ: #20937 In do_numa_page(), Comparing current pid with damon monitor process's pid through vmf->address to get the right 'damon_region', then get 'damon_regon' numa local and remote access and store them to 'struct damon_region' local and remote variables. Signed-off-by: Xin Hao Signed-off-by: Rongwei Wang Reviewed-by: zhongjiang-ali Acked-by: Xunlei Pang Signed-off-by: Weilin Tong --- include/linux/damon.h | 20 +++++++++++ mm/damon/core.c | 77 ++++++++++++++++++++++++++++++++++++++++++- mm/damon/dbgfs.c | 17 +++++++--- mm/damon/vaddr.c | 10 ++---- mm/huge_memory.c | 2 ++ mm/memory.c | 2 ++ 6 files changed, 114 insertions(+), 14 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index bc4793746cfd..fcbebb34996b 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -13,6 +13,7 @@ #include #include #include +#include /* Minimal region size. Every damon_region is aligned by this. */ #define DAMON_MIN_REGION PAGE_SIZE @@ -25,6 +26,9 @@ static inline unsigned long damon_rand(unsigned long l, unsigned long r) return l + get_random_u32_below(r - l); } +extern struct damon_ctx **dbgfs_ctxs; +extern int dbgfs_nr_ctxs; + /** * struct damon_addr_range - Represents an address region of [@start, @end). * @start: Start address of the region (inclusive). @@ -69,6 +73,7 @@ struct damon_region { * @nr_regions: Number of monitoring target regions of this target. * @regions_list: Head of the monitoring target regions of this target. * @list: List head for siblings. + * @target_lock: Use damon_region lock to avoid race. * * Each monitoring context could have multiple targets. For example, a context * for virtual memory address spaces could have multiple target processes. The @@ -80,6 +85,7 @@ struct damon_target { unsigned int nr_regions; struct list_head regions_list; struct list_head list; + spinlock_t target_lock; }; /** @@ -687,4 +693,18 @@ int damon_set_region_biggest_system_ram_default(struct damon_target *t, #endif /* CONFIG_DAMON */ +#ifdef CONFIG_DAMON_VADDR +/* + * 't->id' should be the pointer to the relevant 'struct pid' having reference + * count. Caller must put the returned task, unless it is NULL. + */ +#define damon_get_task_struct(t) \ +(get_pid_task((struct pid *)t->pid, PIDTYPE_PID)) + +void damon_numa_fault(int page_nid, int node_id, struct vm_fault *vmf); +#else +static inline void damon_numa_fault(int page_nid, int node_id, struct vm_fault *vmf) { } + +#endif /* CONFIG_DAMON_VADDR */ + #endif /* _DAMON_H */ diff --git a/mm/damon/core.c b/mm/damon/core.c index 86d7ac6e1412..675d00db03f3 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -399,6 +399,7 @@ struct damon_target *damon_new_target(void) t->nr_regions = 0; INIT_LIST_HEAD(&t->regions_list); INIT_LIST_HEAD(&t->list); + spin_lock_init(&t->target_lock); return t; } @@ -1192,8 +1193,10 @@ static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, do { nr_regions = 0; damon_for_each_target(t, c) { + spin_lock(&t->target_lock); damon_merge_regions_of(t, threshold, sz_limit); nr_regions += damon_nr_regions(t); + spin_unlock(&t->target_lock); } threshold = max(1, threshold * 2); } while (nr_regions > c->attrs.max_nr_regions && @@ -1282,8 +1285,11 @@ static void kdamond_split_regions(struct damon_ctx *ctx) nr_regions < ctx->attrs.max_nr_regions / 3) nr_subregions = 3; - damon_for_each_target(t, ctx) + damon_for_each_target(t, ctx) { + spin_lock(&t->target_lock); damon_split_regions_of(t, nr_subregions); + spin_unlock(&t->target_lock); + } last_nr_regions = nr_regions; } @@ -1507,8 +1513,10 @@ static int kdamond_fn(void *data) } done: damon_for_each_target(t, ctx) { + spin_lock(&t->target_lock); damon_for_each_region_safe(r, next, t) damon_destroy_region(r, t); + spin_unlock(&t->target_lock); } if (ctx->callback.before_terminate) @@ -1603,6 +1611,73 @@ int damon_set_region_biggest_system_ram_default(struct damon_target *t, return damon_set_regions(t, &addr_range, 1); } +static struct damon_target *get_damon_target(struct task_struct *task) +{ + int i; + unsigned long id1, id2; + struct damon_target *t; + + rcu_read_lock(); + for (i = 0; i < READ_ONCE(dbgfs_nr_ctxs); i++) { + struct damon_ctx *ctx = rcu_dereference(dbgfs_ctxs[i]); + + if (!ctx || !ctx->kdamond) + continue; + damon_for_each_target(t, dbgfs_ctxs[i]) { + struct task_struct *ts = damon_get_task_struct(t); + + if (ts) { + id1 = (unsigned long)pid_vnr((struct pid *)t->pid); + id2 = (unsigned long)pid_vnr(get_task_pid(task, PIDTYPE_PID)); + put_task_struct(ts); + if (id1 == id2) + return t; + } + } + } + rcu_read_unlock(); + + return NULL; +} + +static struct damon_region *get_damon_region(struct damon_target *t, unsigned long addr) +{ + struct damon_region *r, *next; + + if (!t || !addr) + return NULL; + + spin_lock(&t->target_lock); + damon_for_each_region_safe(r, next, t) { + if (r->ar.start <= addr && r->ar.end >= addr) { + spin_unlock(&t->target_lock); + return r; + } + } + spin_unlock(&t->target_lock); + + return NULL; +} + +void damon_numa_fault(int page_nid, int node_id, struct vm_fault *vmf) +{ + struct damon_target *t; + struct damon_region *r; + + if (nr_online_nodes > 1) { + t = get_damon_target(current); + if (t) { + r = get_damon_region(t, vmf->address); + if (r) { + if (page_nid == node_id) + r->local++; + else + r->remote++; + } + } + } +} + static int __init damon_init(void) { damon_region_cache = KMEM_CACHE(damon_region, 0); diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index dc0ea1fc30ca..231b437969a1 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -15,8 +15,8 @@ #include #include -static struct damon_ctx **dbgfs_ctxs; -static int dbgfs_nr_ctxs; +struct damon_ctx **dbgfs_ctxs; +int dbgfs_nr_ctxs; static struct dentry **dbgfs_dirs; static DEFINE_MUTEX(damon_dbgfs_lock); @@ -937,10 +937,18 @@ static int dbgfs_rm_context(char *name) goto out_new_dirs; } - for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) { + dbgfs_nr_ctxs--; + /* Prevent NUMA fault get the wrong value */ + smp_mb(); + + for (i = 0, j = 0; i < dbgfs_nr_ctxs + 1; i++) { if (dbgfs_dirs[i] == dir) { + struct damon_ctx *tmp_ctx = dbgfs_ctxs[i]; + + rcu_assign_pointer(dbgfs_ctxs[i], NULL); + synchronize_rcu(); debugfs_remove(dbgfs_dirs[i]); - dbgfs_destroy_ctx(dbgfs_ctxs[i]); + dbgfs_destroy_ctx(tmp_ctx); continue; } new_dirs[j] = dbgfs_dirs[i]; @@ -952,7 +960,6 @@ static int dbgfs_rm_context(char *name) dbgfs_dirs = new_dirs; dbgfs_ctxs = new_ctxs; - dbgfs_nr_ctxs--; goto out_dput; diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 24e22fec54ba..afd3afa5487f 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -22,14 +22,6 @@ #define DAMON_MIN_REGION 1 #endif -/* - * 't->pid' should be the pointer to the relevant 'struct pid' having reference - * count. Caller must put the returned task, unless it is NULL. - */ -static inline struct task_struct *damon_get_task_struct(struct damon_target *t) -{ - return get_pid_task(t->pid, PIDTYPE_PID); -} /* * Get the mm_struct of the given target @@ -295,7 +287,9 @@ static void damon_va_update(struct damon_ctx *ctx) damon_for_each_target(t, ctx) { if (damon_va_three_regions(t, three_regions)) continue; + spin_lock(&t->target_lock); damon_set_regions(t, three_regions, 3); + spin_unlock(&t->target_lock); } } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 92556dfe8451..5728934935a8 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -2170,6 +2171,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) if (node_is_toptier(nid)) last_cpupid = folio_last_cpupid(folio); target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags); + damon_numa_fault(nid, numa_node_id(), vmf); if (target_nid == NUMA_NO_NODE) { folio_put(folio); goto out_map; diff --git a/mm/memory.c b/mm/memory.c index 1870f34a7981..e2b0625b9b96 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -79,6 +79,7 @@ #include #include #include +#include #include @@ -5956,6 +5957,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) else last_cpupid = folio_last_cpupid(folio); target_nid = numa_migrate_prep(folio, vma, vmf->address, nid, &flags); + damon_numa_fault(nid, numa_node_id(), vmf); if (target_nid == NUMA_NO_NODE) { folio_put(folio); goto out_map; -- Gitee From b9a32b1513982c49fce8d08513ef10a768bf92f5 Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Tue, 21 Dec 2021 23:10:41 +0800 Subject: [PATCH 04/12] anolis: mm/damon/dbgfs: Add numa simulate switch ANBZ: #268 ANBZ: #20937 For applications that frequently access the memory area, Doing numa simulation will cause a lot of pagefault and tlb misses which will cause the applications performance regression. So there adds a switch to turn off the numa simulation function by default, if you want to turn on this function just do like below. # cd /sys/kernel/debug/damon/ # echo on > numa_stat # cat numa_stat # on Signed-off-by: Xin Hao Signed-off-by: Rongwei Wang Reviewed-by: zhongjiang-ali Acked-by: Xunlei Pang Signed-off-by: Weilin Tong --- include/linux/damon.h | 1 + mm/damon/core.c | 10 ++++++++- mm/damon/dbgfs.c | 52 +++++++++++++++++++++++++++++++++++++++++-- mm/damon/vaddr.c | 6 +++-- 4 files changed, 64 insertions(+), 5 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index fcbebb34996b..b70488be07a2 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -19,6 +19,7 @@ #define DAMON_MIN_REGION PAGE_SIZE /* Max priority score for DAMON-based operation schemes */ #define DAMOS_MAX_SCORE (99) +DECLARE_STATIC_KEY_FALSE(numa_stat_enabled_key); /* Get a random number in [l, r) */ static inline unsigned long damon_rand(unsigned long l, unsigned long r) diff --git a/mm/damon/core.c b/mm/damon/core.c index 675d00db03f3..63fe034302d4 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1535,6 +1535,13 @@ static int kdamond_fn(void *data) running_exclusive_ctxs = false; mutex_unlock(&damon_lock); + /* + * when no kdamond threads are running, the + * 'numa_stat_enabled_key' keeps default value. + */ + if (!nr_running_ctxs) + static_branch_disable(&numa_stat_enabled_key); + return 0; } @@ -1664,7 +1671,8 @@ void damon_numa_fault(int page_nid, int node_id, struct vm_fault *vmf) struct damon_target *t; struct damon_region *r; - if (nr_online_nodes > 1) { + if (static_branch_unlikely(&numa_stat_enabled_key) + && nr_online_nodes > 1) { t = get_damon_target(current); if (t) { r = get_damon_region(t, vmf->address); diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index 231b437969a1..7469fcb729f1 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -718,6 +718,49 @@ static ssize_t dbgfs_kdamond_pid_read(struct file *file, return len; } +DEFINE_STATIC_KEY_FALSE(numa_stat_enabled_key); + +static ssize_t dbgfs_numa_stat_read(struct file *file, + char __user *buf, size_t count, loff_t *ppos) +{ + char numa_on_buf[5]; + bool enable = static_branch_unlikely(&numa_stat_enabled_key); + int len; + + len = scnprintf(numa_on_buf, 5, enable ? "on\n" : "off\n"); + + return simple_read_from_buffer(buf, count, ppos, numa_on_buf, len); +} + +static ssize_t dbgfs_numa_stat_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t ret = 0; + char *kbuf; + + kbuf = user_input_str(buf, count, ppos); + if (IS_ERR(kbuf)) + return PTR_ERR(kbuf); + + /* Remove white space */ + if (sscanf(kbuf, "%s", kbuf) != 1) { + kfree(kbuf); + return -EINVAL; + } + + if (!strncmp(kbuf, "on", count)) + static_branch_enable(&numa_stat_enabled_key); + else if (!strncmp(kbuf, "off", count)) + static_branch_disable(&numa_stat_enabled_key); + else + ret = -EINVAL; + + if (!ret) + ret = count; + kfree(kbuf); + return ret; +} + static int damon_dbgfs_open(struct inode *inode, struct file *file) { damon_dbgfs_warn_deprecation(); @@ -756,12 +799,17 @@ static const struct file_operations kdamond_pid_fops = { .read = dbgfs_kdamond_pid_read, }; +static const struct file_operations numa_stat_ops = { + .write = dbgfs_numa_stat_write, + .read = dbgfs_numa_stat_read, +}; + static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx) { const char * const file_names[] = {"attrs", "schemes", "target_ids", - "init_regions", "kdamond_pid"}; + "init_regions", "kdamond_pid", "numa_stat"}; const struct file_operations *fops[] = {&attrs_fops, &schemes_fops, - &target_ids_fops, &init_regions_fops, &kdamond_pid_fops}; + &target_ids_fops, &init_regions_fops, &kdamond_pid_fops, &numa_stat_ops}; int i; for (i = 0; i < ARRAY_SIZE(file_names); i++) diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index afd3afa5487f..fd967d705e53 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -356,7 +356,8 @@ static int damon_va_pmd_entry(pmd_t *pmd, unsigned long addr, if (pmd_trans_huge(pmde)) { damon_pmdp_mkold(pmd, walk->vma, addr); - if (nr_online_nodes > 1) + if (static_branch_unlikely(&numa_stat_enabled_key) && + nr_online_nodes > 1) result = damon_pmdp_mknone(pmd, walk, addr); spin_unlock(ptl); if (result) { @@ -379,7 +380,8 @@ static int damon_va_pmd_entry(pmd_t *pmd, unsigned long addr, return 0; } damon_ptep_mkold(pte, walk->vma, addr); - if (nr_online_nodes > 1) + if (static_branch_unlikely(&numa_stat_enabled_key) && + nr_online_nodes > 1) result = damon_ptep_mknone(pte, walk, addr); pte_unmap_unlock(pte, ptl); if (result) -- Gitee From 1f132f346697f0826d69b507ce547bab12577e7b Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Thu, 9 Dec 2021 23:20:48 +0800 Subject: [PATCH 05/12] anolis: mm/damon/tracepoint: Do a little format adjustment ANBZ: #268 ANBZ: #20937 In order to make the user tools more friendly to parsing these data, Change the target_id to the process id and the region address format to hexadecimal. Signed-off-by: Xin Hao Signed-off-by: Rongwei Wang Reviewed-by: zhongjiang-ali Acked-by: Xunlei Pang Signed-off-by: Weilin Tong --- include/trace/events/damon.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h index c79f1d4c39af..a4739a890136 100644 --- a/include/trace/events/damon.h +++ b/include/trace/events/damon.h @@ -34,7 +34,7 @@ TRACE_EVENT(damon_aggregated, __entry->age = r->age; ), - TP_printk("target_id=%lu nr_regions=%u %lu-%lu: %u %u", + TP_printk("target_id=%lu nr_regions=%u %lx-%lx: %u %u", __entry->target_id, __entry->nr_regions, __entry->start, __entry->end, __entry->nr_accesses, __entry->age) -- Gitee From 12c364106e69e18a03e2963afec572ae47b8e362 Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Thu, 9 Dec 2021 23:20:48 +0800 Subject: [PATCH 06/12] anolis: mm/damon/tracepoint: Add 'damon_region' NUMA access statistics support ANBZ: #268 ANBZ: #20937 This patch is used to support 'damon_region' NUMA access for tracepoint, The purpose of this is to facilitate users to obtain the numa access status of 'damon_region' through perf tool or others tools. Signed-off-by: Xin Hao Signed-off-by: Rongwei Wang Reviewed-by: zhongjiang-ali Acked-by: Xunlei Pang Signed-off-by: Weilin Tong --- include/trace/events/damon.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h index a4739a890136..c976a093a4e4 100644 --- a/include/trace/events/damon.h +++ b/include/trace/events/damon.h @@ -23,6 +23,8 @@ TRACE_EVENT(damon_aggregated, __field(unsigned long, end) __field(unsigned int, nr_accesses) __field(unsigned int, age) + __field(unsigned long, local) + __field(unsigned long, remote) ), TP_fast_assign( @@ -32,12 +34,15 @@ TRACE_EVENT(damon_aggregated, __entry->end = r->ar.end; __entry->nr_accesses = r->nr_accesses; __entry->age = r->age; + __entry->local = r->local; + __entry->remote = r->remote; ), - TP_printk("target_id=%lu nr_regions=%u %lx-%lx: %u %u", + TP_printk("target_id=%lu nr_regions=%u %lx-%lx: %u %u %lu %lu", __entry->target_id, __entry->nr_regions, __entry->start, __entry->end, - __entry->nr_accesses, __entry->age) + __entry->nr_accesses, __entry->age, + __entry->local, __entry->remote) ); #endif /* _TRACE_DAMON_H */ -- Gitee From d216068ec9ca749e8cfc7dd8f1bb3ecac10bfbfe Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Mon, 28 Mar 2022 11:11:33 +0800 Subject: [PATCH 07/12] anolis: mm/damon: Fix NUMA statistics bug ANBZ: #708 ANBZ: #20937 In the previous patch, we used NUMA simulation to count the cross-NUMA access of each process, However, we ignore a problem, that is threads, The values of thread and process 'PID' are different, so the statistics are inaccurate, Here we use the 'tgid' of "task_struct" values are consistent to resolve this bug. Fixes: e7c2c8a9182f ("anolis: mm/damon: Add 'damon_region' NUMA local and remote access statistics") Signed-off-by: Xin Hao Signed-off-by: Rongwei Wang Acked-by: Gang Deng Acked-by: Xunlei Pang Signed-off-by: Weilin Tong --- mm/damon/core.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index 63fe034302d4..9993ada2016d 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1621,7 +1621,6 @@ int damon_set_region_biggest_system_ram_default(struct damon_target *t, static struct damon_target *get_damon_target(struct task_struct *task) { int i; - unsigned long id1, id2; struct damon_target *t; rcu_read_lock(); @@ -1633,13 +1632,15 @@ static struct damon_target *get_damon_target(struct task_struct *task) damon_for_each_target(t, dbgfs_ctxs[i]) { struct task_struct *ts = damon_get_task_struct(t); - if (ts) { - id1 = (unsigned long)pid_vnr((struct pid *)t->pid); - id2 = (unsigned long)pid_vnr(get_task_pid(task, PIDTYPE_PID)); + if (!ts) + continue; + + if (ts->tgid == task->tgid) { put_task_struct(ts); - if (id1 == id2) - return t; + rcu_read_unlock(); + return t; } + put_task_struct(ts); } } rcu_read_unlock(); -- Gitee From 6a5abcd830552c4165212048b18feb0268bd1879 Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Thu, 31 Mar 2022 11:45:19 +0800 Subject: [PATCH 08/12] anolis: mm/damon: Solve the bug of NUMA statistics about threads ANBZ: #708 ANBZ: #20937 When the parent process 'p1' is used to create child threads 't1', 't2', and only 'p1' is monitored, we found the NUMA access statistics cannot be collected. Fixes: a4ddc37e39f0 ("anolis: mm/damon: Fix NUMA statistics bug") Signed-off-by: Xin Hao Acked-by: Gang Deng Acked-by: Xunlei Pang Signed-off-by: Weilin Tong --- mm/damon/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index 9993ada2016d..c453268d7c44 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1635,7 +1635,7 @@ static struct damon_target *get_damon_target(struct task_struct *task) if (!ts) continue; - if (ts->tgid == task->tgid) { + if (ts->mm == task->mm) { put_task_struct(ts); rcu_read_unlock(); return t; -- Gitee From 6100ee470555dc2b4c108ce23fe5acc98d27a0dc Mon Sep 17 00:00:00 2001 From: Rongwei Wang Date: Mon, 25 Jul 2022 19:47:39 +0800 Subject: [PATCH 09/12] anolis: mm/damon: fix behaviour of init_regions ANBZ: #1661 ANBZ: #20937 A fixed address range for processes can been monitored by setting init_regions in DAMON. But now, the init_regions always ignored in a new primitive period. This patch mainly to solve the above issue. The init_regions setting will be saved, checked, and restored first when next primitive period comes. Signed-off-by: Rongwei Wang Signed-off-by: Yan Yan Reviewed-by: Xin Hao Acked-by: Gang Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/512 Signed-off-by: Weilin Tong --- include/linux/damon.h | 2 ++ mm/damon/core.c | 3 +++ mm/damon/dbgfs.c | 28 ++++++++++++++++++++++++ mm/damon/vaddr.c | 50 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 83 insertions(+) diff --git a/include/linux/damon.h b/include/linux/damon.h index b70488be07a2..fc62ab797b91 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -84,6 +84,8 @@ struct damon_region { struct damon_target { struct pid *pid; unsigned int nr_regions; + unsigned int nr_init_regions; + struct damon_addr_range *init_regions; struct list_head regions_list; struct list_head list; spinlock_t target_lock; diff --git a/mm/damon/core.c b/mm/damon/core.c index c453268d7c44..99c9ddfe50d9 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -397,6 +397,8 @@ struct damon_target *damon_new_target(void) t->pid = NULL; t->nr_regions = 0; + t->nr_init_regions = 0; + t->init_regions = NULL; INIT_LIST_HEAD(&t->regions_list); INIT_LIST_HEAD(&t->list); spin_lock_init(&t->target_lock); @@ -425,6 +427,7 @@ void damon_free_target(struct damon_target *t) damon_for_each_region_safe(r, next, t) damon_free_region(r); + kfree(t->init_regions); kfree(t); } diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index 7469fcb729f1..3e4a52efdf97 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -640,6 +640,9 @@ static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len) damon_for_each_target(t, c) { damon_for_each_region_safe(r, next, t) damon_destroy_region(r, t); + kfree(t->init_regions); + t->init_regions = NULL; + t->nr_init_regions = 0; } while (pos < len) { @@ -653,12 +656,37 @@ static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len) pos += parsed; } + /* Set damon_target->init_regions */ + damon_for_each_target(t, c) { + unsigned int nr_regions = t->nr_regions; + int idx = 0; + + t->nr_init_regions = nr_regions; + t->init_regions = kmalloc_array(nr_regions, sizeof(struct damon_addr_range), + GFP_KERNEL); + if (t->init_regions == NULL) + goto fail; + damon_for_each_region_safe(r, next, t) { + /* TODO: Never happen? */ + if (idx == nr_regions) { + pr_alert("nr_regions overflow, init_regions already full."); + break; + } + t->init_regions[idx].start = r->ar.start; + t->init_regions[idx].end = r->ar.end; + idx++; + } + } + return 0; fail: damon_for_each_target(t, c) { damon_for_each_region_safe(r, next, t) damon_destroy_region(r, t); + kfree(t->init_regions); + t->init_regions = NULL; + t->nr_init_regions = 0; } return err; } diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index fd967d705e53..7833870890f9 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -276,6 +276,45 @@ static void damon_va_init(struct damon_ctx *ctx) } } +static void damon_va_apply_init_regions(struct damon_target *t) +{ + struct damon_region *r, *next, *prev; + unsigned int i = 0; + + /* Remove all regions */ + damon_for_each_region_safe(r, next, t) { + damon_destroy_region(r, t); + } + + for (i = 0; i < t->nr_init_regions; i++) { + struct damon_addr_range ar = t->init_regions[i]; + + r = damon_new_region(ar.start, ar.end); + if (!r) { + pr_err("allocating memory failed for new region: 0x%lx - 0x%lx\n", + ar.start, ar.end); + goto fail; + } + damon_add_region(r, t); + if (damon_nr_regions(t) > 1) { + prev = damon_prev_region(r); + if (prev->ar.end > r->ar.start) { + /* + * Never happen! this case had been checked during + * setting init_regions. + */ + goto fail; + } + } + } + return; + +fail: + damon_for_each_region_safe(r, next, t) { + damon_destroy_region(r, t); + } +} + /* * Update regions for current memory mappings */ @@ -285,6 +324,17 @@ static void damon_va_update(struct damon_ctx *ctx) struct damon_target *t; damon_for_each_target(t, ctx) { + /* + * If init_regions have been set, updating new target + * according to init_regions. + */ + if (t->nr_init_regions) { + spin_lock(&t->target_lock); + damon_va_apply_init_regions(t); + spin_unlock(&t->target_lock); + + continue; + } if (damon_va_three_regions(t, three_regions)) continue; spin_lock(&t->target_lock); -- Gitee From 13975aeea0a0940de1fe16dac77f0d910be6ab26 Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Wed, 6 Apr 2022 19:13:38 +0800 Subject: [PATCH 10/12] anolis: mm: damon: add flush once in one sampling period ANBZ: #1270 ANBZ: #20937 On some ARM machines, frequent TLB flush will result in high overhead, to solve this problem, we add flush batch function, it only need to flush once during NUMA fault simulation sampling period. Signed-off-by: Gang Deng Signed-off-by: Xin Hao Reviewed-by: Gang Deng Signed-off-by: Weilin Tong --- include/linux/damon.h | 1 + mm/damon/vaddr.c | 50 ++++++++++++++++++++++++++++++------------- 2 files changed, 36 insertions(+), 15 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index fc62ab797b91..b99add6c4f6c 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -562,6 +562,7 @@ struct damon_ctx { struct completion kdamond_started; /* public: */ + int need_flush; struct task_struct *kdamond; struct mutex kdamond_lock; diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 7833870890f9..3cd981a9766a 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "ops-common.h" @@ -347,6 +348,7 @@ static bool damon_pmdp_mknone(pmd_t *pmd, struct mm_walk *walk, unsigned long ad { bool preserve_write; pmd_t entry = *pmd; + int *flush_enalbe = walk->private; if (is_huge_zero_pmd(entry) || pmd_protnone(entry)) return false; @@ -359,6 +361,7 @@ static bool damon_pmdp_mknone(pmd_t *pmd, struct mm_walk *walk, unsigned long ad entry = pmd_mkwrite(entry, walk->vma); set_pmd_at(walk->mm, addr, pmd, entry); + ++*flush_enalbe; return true; } return false; @@ -368,6 +371,7 @@ static bool damon_ptep_mknone(pte_t *pte, struct mm_walk *walk, unsigned long ad { pte_t oldpte, ptent; bool preserve_write; + int *flush_enalbe = walk->private; oldpte = *pte; if (pte_protnone(oldpte)) @@ -382,6 +386,7 @@ static bool damon_ptep_mknone(pte_t *pte, struct mm_walk *walk, unsigned long ad ptent = pte_mkwrite(ptent, walk->vma); ptep_modify_prot_commit(walk->vma, addr, pte, oldpte, ptent); + ++*flush_enalbe; return true; } return false; @@ -393,7 +398,6 @@ static int damon_va_pmd_entry(pmd_t *pmd, unsigned long addr, pte_t *pte; pmd_t pmde; spinlock_t *ptl; - bool result = false; if (pmd_trans_huge(pmdp_get(pmd))) { ptl = pmd_lock(walk->mm, pmd); @@ -408,13 +412,8 @@ static int damon_va_pmd_entry(pmd_t *pmd, unsigned long addr, damon_pmdp_mkold(pmd, walk->vma, addr); if (static_branch_unlikely(&numa_stat_enabled_key) && nr_online_nodes > 1) - result = damon_pmdp_mknone(pmd, walk, addr); + damon_pmdp_mknone(pmd, walk, addr); spin_unlock(ptl); - if (result) { - unsigned long haddr = addr & HPAGE_PMD_MASK; - - flush_tlb_range(walk->vma, haddr, haddr + HPAGE_PMD_SIZE); - } return 0; } spin_unlock(ptl); @@ -432,10 +431,8 @@ static int damon_va_pmd_entry(pmd_t *pmd, unsigned long addr, damon_ptep_mkold(pte, walk->vma, addr); if (static_branch_unlikely(&numa_stat_enabled_key) && nr_online_nodes > 1) - result = damon_ptep_mknone(pte, walk, addr); + damon_ptep_mknone(pte, walk, addr); pte_unmap_unlock(pte, ptl); - if (result) - flush_tlb_page(walk->vma, addr); return 0; } @@ -498,10 +495,11 @@ static const struct mm_walk_ops damon_va_ops = { .walk_lock = PGWALK_RDLOCK, }; -static void damon_va_check(struct mm_struct *mm, unsigned long addr) +static void damon_va_check(struct damon_ctx *ctx, struct mm_struct *mm, + unsigned long addr) { mmap_read_lock(mm); - walk_page_range(mm, addr, addr + 1, &damon_va_ops, NULL); + walk_page_range(mm, addr, addr + 1, &damon_va_ops, &ctx->need_flush); mmap_read_unlock(mm); } @@ -509,12 +507,12 @@ static void damon_va_check(struct mm_struct *mm, unsigned long addr) * Functions for the access checking of the regions */ -static void __damon_va_prepare_access_check(struct mm_struct *mm, +static void __damon_va_prepare_access_check(struct damon_ctx *ctx, struct mm_struct *mm, struct damon_region *r) { r->sampling_addr = damon_rand(r->ar.start, r->ar.end); - damon_va_check(mm, r->sampling_addr); + damon_va_check(ctx, mm, r->sampling_addr); } static void damon_va_prepare_access_checks(struct damon_ctx *ctx) @@ -524,11 +522,33 @@ static void damon_va_prepare_access_checks(struct damon_ctx *ctx) struct damon_region *r; damon_for_each_target(t, ctx) { + ctx->need_flush = 0; mm = damon_get_mm(t); if (!mm) continue; + + if (static_branch_unlikely(&numa_stat_enabled_key) && + nr_online_nodes > 1) { + inc_tlb_flush_pending(mm); + ctx->need_flush = 1; + } + damon_for_each_region(r, t) - __damon_va_prepare_access_check(mm, r); + __damon_va_prepare_access_check(ctx, mm, r); + + /* + * We have to make sure that in some concurrent scenarios, + * one core is doing numa sampling, but anthor core turns off it, + * in this case, if we still use variable "numa_stat_enabled_key" + * to check if it needs to be flushed, it will cause the flush_tlb_mm() + * not be called. + */ + if (ctx->need_flush > 1) + flush_tlb_mm(mm); + + if (ctx->need_flush) + dec_tlb_flush_pending(mm); + mmput(mm); } } -- Gitee From 2953ede35c26d1616439cf1bb4a2cb6ea830391b Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Thu, 26 May 2022 20:12:17 +0800 Subject: [PATCH 11/12] anolis: mm: damon: Fixed potential race issues when accessing "damon_regon" ANBZ: #1341 ANBZ: #20937 When we get "damon_region", there will have following potential competition, one is that "damon_region" is re-created and added to the list, another is that "damon_region" is freed. If we don't solve these underlying races, kernel panic will happen, like below: [ 374.439587] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000 [ 374.440872] Mem abort info: [ 374.441248] ESR = 0x96000004 [ 374.441641] EC = 0x25: DABT (current EL), IL = 32 bits [ 374.442389] SET = 0, FnV = 0 [ 374.442820] EA = 0, S1PTW = 0 [ 374.443297] Data abort info: [ 374.443702] ISV = 0, ISS = 0x00000004 [ 374.444224] CM = 0, WnR = 0 [ 374.444639] user pgtable: 4k pages, 48-bit VAs, pgdp=000000010838b000 [ 374.445566] [0000000000000000] pgd=0000000000000000, p4d=0000000000000000 [ 374.446496] Internal error: Oops: 96000004 [#1] SMP [ 374.452803] CPU: 4 PID: 5110 Comm: memhog Kdump: loaded Tainted: G E 5.10.84+ #30 [ 374.454087] Hardware name: Alibaba Cloud Alibaba Cloud ECS, BIOS 0.0.0 02/06/2015 [ 374.455182] pstate: 20400005 (nzCv daif +PAN -UAO -TCO BTYPE=--) [ 374.456078] pc : damon_numa_fault+0xe0/0x230 [ 374.456710] lr : damon_numa_fault+0x5c/0x230 [ 374.457349] sp : ffff80001b57bc20 [ 374.457843] x29: ffff80001b57bc20 x28: ffff001800f7b780 [ 374.458625] x27: 0000000000000008 x26: 0000000000000000 [ 374.459411] x25: 0000000000000000 x24: ffff0008055b7bb0 [ 374.460191] x23: ffff8000114fbe68 x22: ffff0008055b7b80 [ 374.460988] x21: 0000000000000000 x20: 0000000000000000 [ 374.461785] x19: ffff80001b57bd20 x18: 0000000000000000 [ 374.462569] x17: 0000000000000000 x16: 0000000000000000 [ 374.463348] x15: 0000000000000000 x14: 0000000000000000 [ 374.464133] x13: 0000000000000000 x12: 0000000000000000 [ 374.464946] x11: 0000000000000000 x10: 0000000000000000 [ 374.465731] x9 : ffff8000103a3de8 x8 : 0000000000000000 [ 374.466524] x7 : 0000000000000000 x6 : ffff800010c1b000 [ 374.467309] x5 : 0000ffff82c00000 x4 : 0000000000000000 [ 374.468108] x3 : 0000000000000000 x2 : ffff0008055b7b90 [ 374.468899] x1 : ffff0008014eb700 x0 : ffffffffffffffe0 [ 374.469687] Call trace: [ 374.470051] damon_numa_fault+0xe0/0x230 [ 374.470629] do_huge_pmd_numa_page+0x138/0x620 [ 374.471286] __handle_mm_fault+0x30c/0x3c0 [ 374.471891] handle_mm_fault+0xa8/0x210 [ 374.472467] do_page_fault+0x174/0x3f4 [ 374.473043] do_translation_fault+0xbc/0xdc [ 374.473670] do_mem_abort+0x4c/0xb0 [ 374.474200] el0_da+0x44/0x80 [ 374.474640] el0_sync_handler+0x168/0x1b0 [ 374.475233] el0_sync+0x180/0x1c0 Signed-off-by: Xin Hao Reviewed-by: Baolin Wang Reviewed-by: Gang Deng Signed-off-by: Weilin Tong --- mm/damon/core.c | 10 +++++----- mm/damon/vaddr.c | 3 +++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index 99c9ddfe50d9..11a415a69295 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -425,9 +425,11 @@ void damon_free_target(struct damon_target *t) { struct damon_region *r, *next; + spin_lock(&t->target_lock); damon_for_each_region_safe(r, next, t) damon_free_region(r); kfree(t->init_regions); + spin_unlock(&t->target_lock); kfree(t); } @@ -1658,14 +1660,10 @@ static struct damon_region *get_damon_region(struct damon_target *t, unsigned lo if (!t || !addr) return NULL; - spin_lock(&t->target_lock); damon_for_each_region_safe(r, next, t) { - if (r->ar.start <= addr && r->ar.end >= addr) { - spin_unlock(&t->target_lock); + if (r->ar.start <= addr && r->ar.end >= addr) return r; - } } - spin_unlock(&t->target_lock); return NULL; } @@ -1679,6 +1677,7 @@ void damon_numa_fault(int page_nid, int node_id, struct vm_fault *vmf) && nr_online_nodes > 1) { t = get_damon_target(current); if (t) { + spin_lock(&t->target_lock); r = get_damon_region(t, vmf->address); if (r) { if (page_nid == node_id) @@ -1686,6 +1685,7 @@ void damon_numa_fault(int page_nid, int node_id, struct vm_fault *vmf) else r->remote++; } + spin_unlock(&t->target_lock); } } } diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 3cd981a9766a..889259f40e57 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -252,10 +252,12 @@ static void __damon_va_init_regions(struct damon_ctx *ctx, sz = DAMON_MIN_REGION; /* Set the initial three regions of the target */ + spin_lock(&t->target_lock); for (i = 0; i < 3; i++) { r = damon_new_region(regions[i].start, regions[i].end); if (!r) { pr_err("%d'th init region creation failed\n", i); + spin_unlock(&t->target_lock); return; } damon_add_region(r, t); @@ -263,6 +265,7 @@ static void __damon_va_init_regions(struct damon_ctx *ctx, nr_pieces = (regions[i].end - regions[i].start) / sz; damon_va_evenly_split_region(t, r, nr_pieces); } + spin_unlock(&t->target_lock); } /* Initialize '->regions_list' of every target (task) */ -- Gitee From 160f172c83605e17bad50346dadc3dcab18ecb57 Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Tue, 31 May 2022 17:17:28 +0800 Subject: [PATCH 12/12] anolis: mm: damon: change kmalloc() flags in damon_new_region() ANBZ: #1341 ANBZ: #20937 the damon_new_region() is called in many functions, such as damon_va_update(), kdamond_split_regions(), __damon_va_init_regions(). they are all in spinlock's environment, so the flags can not be setted to GFP_KERNEL, so there use GFP_ATOMIC instead. Signed-off-by: Xin Hao Reviewed-by: Baolin Wang Reviewed-by: Gang Deng Signed-off-by: Weilin Tong --- mm/damon/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index 11a415a69295..af9bbe0e22fa 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -121,7 +121,7 @@ struct damon_region *damon_new_region(unsigned long start, unsigned long end) { struct damon_region *region; - region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); + region = kmem_cache_alloc(damon_region_cache, GFP_ATOMIC); if (!region) return NULL; -- Gitee