From 772875e87b0361212f18281e98140a3ae34534e8 Mon Sep 17 00:00:00 2001 From: Ke Liu Date: Tue, 23 May 2023 11:56:01 +0800 Subject: [PATCH] v2.0 swappiness feature Signed-off-by: Ke Liu Change-Id: Ic8e550e047a6fec5131ab075823a5ab2a4d74be9 --- mm/vmscan.c | 121 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/mm/vmscan.c b/mm/vmscan.c index e319f2d460f3..b9cdf8166c6a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2204,6 +2204,57 @@ bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) return inactive * inactive_ratio < active; } + +#ifdef CONFIG_MEM_PURGEABLE +#define ANON_FILE_PURG 3 +static atomic64_t purg_swappiness = ATOMIC_INIT(0); + +static unsigned long purgeable_node_reclaim(pg_data_t *pgdata, struct scan_control *sc) +{ + struct mem_cgroup *memcg = NULL; + unsigned long nr = 0; +#ifdef CONFIG_MEMCG + while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))) +#endif + { + struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdata); + + shrink_list(LRU_ACTIVE_PURGEABLE, -1, lruvec, sc); + nr += shrink_list(LRU_INACTIVE_PURGEABLE, -1, lruvec, sc); + } + + pr_info("reclaim %lu purgeable pages.\n", nr); + + return nr; +} + +static bool is_purg_lru(enum lru_list lru) +{ + return (lru == LRU_INACTIVE_PURGEABLE || lru == LRU_ACTIVE_PURGEABLE); +} + +static int purgeable_swappiness_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + const unsigned int ratio = 100; + unsigned int reclaim_size_kb = 0; + + if (val > ratio) + return -EINVAL; + + atomic64_set(&purg_swappiness, val); + + return 0; +} + +static u64 purgeable_swappiness_read(struct cgroup_subsys_state *css, struct cftype *cft) +{ + return atomic64_read(&purg_swappiness); +} + +#endif + /* * Determine how aggressively the anon and file LRU lists should be * scanned. The relative value of each set of LRU lists is determined @@ -2220,7 +2271,14 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, struct mem_cgroup *memcg = lruvec_memcg(lruvec); unsigned long anon_cost, file_cost, total_cost; int swappiness = mem_cgroup_swappiness(memcg); +#ifdef CONFIG_MEM_PURGEABLE + unsigned long purg_cost; + int purg_anon_swappiness = purg_swappiness; + unsigned long pp; + u64 fraction[ANON_FILE_PURG]; +#else u64 fraction[ANON_AND_FILE]; +#endif u64 denominator = 0; /* gcc */ enum scan_balance scan_balance; unsigned long ap, fp; @@ -2287,6 +2345,34 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, * * With swappiness at 100, anon and file have equal IO cost. */ +#ifdef CONFIG_MEM_PURGEABLE + total_cost = sc->anon_cost + sc->file_cost + sc->purg_cost; + anon_cost = total_cost + sc->anon_cost; + file_cost = total_cost + sc->file_cost; + purg_cost = total_cost + sc->purg_cost; + total_cost = anon_cost + file_cost + purg_cost; + + /* + * If the system is almost out of purgeable pages, force-scan anon and file. + */ + if (sc->purg_is_tiny) { + purg_anon_swappiness = 100; + } + + ap = swappiness * (purg_anon_swappiness) * (total_cost + 1); + ap /= anon_cost + 1; + + pp = swappiness * (100 - purg_anon_swappiness) * (total_cost + 1); + pp /= purg_cost + 1; + + fp = (200 - swappiness) * 100 * (total_cost + 1); + fp /= file_cost + 1; + + fraction[0] = ap; + fraction[1] = fp; + fraction[2] = pp; + denominator = ap + fp + pp; +#else total_cost = sc->anon_cost + sc->file_cost; anon_cost = total_cost + sc->anon_cost; file_cost = total_cost + sc->file_cost; @@ -2301,9 +2387,15 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, fraction[0] = ap; fraction[1] = fp; denominator = ap + fp; +#endif out: for_each_evictable_lru(lru) { int file = is_file_lru(lru); +#ifdef CONFIG_MEM_PURGEABLE + if (is_purg_lru(lru)) { + file = 2; + } +#endif unsigned long lruvec_size; unsigned long low, min; unsigned long scan; @@ -2441,9 +2533,16 @@ void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) sc->priority == DEF_PRIORITY); blk_start_plug(&plug); +#ifdef CONFIG_MEM_PURGEABLE + while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || + nr[LRU_INACTIVE_FILE] || nr[LRU_INACTIVE_PURGEABLE]) { + unsigned long nr_anon, nr_file, nr_purg, percentage; +#else while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { unsigned long nr_anon, nr_file, percentage; +#endif + unsigned long nr_scanned; for_each_evictable_lru(lru) { @@ -2470,6 +2569,9 @@ void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) */ nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; +#ifdef CONFIG_MEM_PURGEABLE + nr_purg = nr[LRU_INACTIVE_PURGEABLE] + nr[LRU_ACTIVE_PURGEABLE]; +#endif /* * It's just vindictive to attack the larger once the smaller @@ -2477,6 +2579,24 @@ void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) * smaller below, this makes sure that we only make one nudge * towards proportionality once we've got nr_to_reclaim. */ +#ifdef CONFIG_MEM_PURGEABLE + if (!nr_file || !nr_anon || !nr_purg) + break; + if (nr_file > nr_anon + nr_purg) { + unsigned long scan_target = targets[LRU_INACTIVE_ANON] + + targets[LRU_ACTIVE_ANON] + targets[LRU_INACTIVE_PURGEABLE] + + targets[LRU_ACTIVE_PURGEABLE] + + 1; + lru = LRU_BASE; + percentage = (nr_anon + nr_purg) * 100 / scan_target; + } else { + unsigned long scan_target = targets[LRU_INACTIVE_FILE] + + targets[LRU_ACTIVE_FILE] + 1; + lru = LRU_FILE; + percentage = nr_file * 100 / scan_target; + } +#else if (!nr_file || !nr_anon) break; @@ -2492,6 +2612,7 @@ void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) percentage = nr_file * 100 / scan_target; } +#endif /* Stop scanning the smaller of the LRU */ nr[lru] = 0; nr[lru + LRU_ACTIVE] = 0; -- Gitee