[PATCH v7 11/24] mm: Move end_index check out of readahead loop

From: Matthew Wilcox
Date: Wed Feb 19 2020 - 16:01:28 EST


From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx>

By reducing nr_to_read, we can eliminate this check from inside the loop.

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
mm/readahead.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/mm/readahead.c b/mm/readahead.c
index 07cdfbf00f4b..ace611f4bf05 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -166,8 +166,6 @@ void __do_page_cache_readahead(struct address_space *mapping,
unsigned long lookahead_size)
{
struct inode *inode = mapping->host;
- struct page *page;
- unsigned long end_index; /* The last page we want to read */
LIST_HEAD(page_pool);
loff_t isize = i_size_read(inode);
gfp_t gfp_mask = readahead_gfp_mask(mapping);
@@ -179,22 +177,27 @@ void __do_page_cache_readahead(struct address_space *mapping,
._nr_pages = 0,
};
unsigned long i;
+ pgoff_t end_index; /* The last page we want to read */

if (isize == 0)
return;

- end_index = ((isize - 1) >> PAGE_SHIFT);
+ end_index = (isize - 1) >> PAGE_SHIFT;
+ if (index > end_index)
+ return;
+ if (index + nr_to_read < index)
+ nr_to_read = ULONG_MAX - index + 1;
+ if (index + nr_to_read >= end_index)
+ nr_to_read = end_index - index + 1;

/*
* Preallocate as many pages as we will need.
*/
for (i = 0; i < nr_to_read; i++) {
- if (index + i > end_index)
- break;
+ struct page *page = xa_load(&mapping->i_pages, index + i);

BUG_ON(index + i != rac._index + rac._nr_pages);

- page = xa_load(&mapping->i_pages, index + i);
if (page && !xa_is_value(page)) {
/*
* Page already present? Kick off the current batch of
--
2.25.0