diff --git a/mm/filemap.c b/mm/filemap.c index 2eeb9978f39e429237c315269952aff60a43c08d..b48ec6fc8f4b91072e872b320407817d9eba24bf 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3168,7 +3168,14 @@ void filemap_map_pages(struct vm_fault *vmf, if (xas.xa_index >= max_idx) goto unlock; - if (mmap_miss > 0) + /* + * If there are too many pages that are recently evicted + * in a file, they will probably continue to be evicted. + * In such situation, read-ahead is only a waste of IO. + * Don't decrease mmap_miss in this scenario to make sure + * we can stop read-ahead. + */ + if (mmap_miss > 0 && !PageWorkingset(page)) mmap_miss--; vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT; diff --git a/mm/readahead.c b/mm/readahead.c index ed23d5dec12387fd862caad619b9d072d44649fc..22dd9c8fe80807ff8b26048ae7965323cac2c361 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -220,11 +220,18 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, if (mapping->a_ops->readpages) { page->index = index + i; list_add(&page->lru, &page_pool); - } else if (add_to_page_cache_lru(page, mapping, index + i, - gfp_mask) < 0) { - put_page(page); - read_pages(ractl, &page_pool, true); - continue; + } else { + int ret; + + ret = add_to_page_cache_lru(page, mapping, index + i, + gfp_mask); + if (ret < 0) { + put_page(page); + read_pages(ractl, &page_pool, true); + if (ret == -ENOMEM) + break; + continue; + } } if (i == nr_to_read - lookahead_size) SetPageReadahead(page);