From fac2b6a39eb8e615cc132719847bea66887872cb Mon Sep 17 00:00:00 2001 From: Liu Shixin Date: Wed, 27 Mar 2024 17:18:06 +0800 Subject: [PATCH 1/2] mm/readahead: break read-ahead loop if filemap_add_folio return -ENOMEM maillist inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8EXN6 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-readahead-break-read-ahead-loop-if-filemap_add_folio-return-enomem.patch -------------------------------- When filemap_add_folio() return -ENOMEM, break read-ahead loop like what filemap_alloc_folio() does. Signed-off-by: Liu Shixin Signed-off-by: Jinjiang Tu Reviewed-by: Jan Kara --- mm/readahead.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/mm/readahead.c b/mm/readahead.c index ed23d5dec123..22dd9c8fe808 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -220,11 +220,18 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, if (mapping->a_ops->readpages) { page->index = index + i; list_add(&page->lru, &page_pool); - } else if (add_to_page_cache_lru(page, mapping, index + i, - gfp_mask) < 0) { - put_page(page); - read_pages(ractl, &page_pool, true); - continue; + } else { + int ret; + + ret = add_to_page_cache_lru(page, mapping, index + i, + gfp_mask); + if (ret < 0) { + put_page(page); + read_pages(ractl, &page_pool, true); + if (ret == -ENOMEM) + break; + continue; + } } if (i == nr_to_read - lookahead_size) SetPageReadahead(page); -- Gitee From 69f25e7d45c3c35b75c068aedd582b90c3e4eb5f Mon Sep 17 00:00:00 2001 From: Liu Shixin Date: Wed, 27 Mar 2024 17:18:07 +0800 Subject: [PATCH 2/2] mm/filemap: don't decrease mmap_miss when page has workingset flag maillist inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8EXN6 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-readahead-dont-decrease-mmap_miss-when-folio-has-workingset-flags.patch -------------------------------- If there are too many pages that are recently evicted in a file, then they will probably continue to be evicted. In such situation, there is no positive effect to read-ahead this file since it is only a waste of IO. The mmap_miss is increased in do_sync_mmap_readahead() and decreased in both do_async_mmap_readahead() and filemap_map_pages(). In order to skip read-ahead in above scenario, the mmap_miss have to increased exceed MMAP_LOTSAMISS. This can be done by stop decreased mmap_miss when page has workingset flag. The async path is not to care because in above scenario, it's hard to run into the async path. Signed-off-by: Liu Shixin Reviewed-by: Jan Kara --- mm/filemap.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/mm/filemap.c b/mm/filemap.c index 2eeb9978f39e..b48ec6fc8f4b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3168,7 +3168,14 @@ void filemap_map_pages(struct vm_fault *vmf, if (xas.xa_index >= max_idx) goto unlock; - if (mmap_miss > 0) + /* + * If there are too many pages that are recently evicted + * in a file, they will probably continue to be evicted. + * In such situation, read-ahead is only a waste of IO. + * Don't decrease mmap_miss in this scenario to make sure + * we can stop read-ahead. + */ + if (mmap_miss > 0 && !PageWorkingset(page)) mmap_miss--; vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT; -- Gitee