[PATCH v6 27/99] page cache: Convert delete_batch to XArray

From: Matthew Wilcox
Date: Wed Jan 17 2018 - 16:03:19 EST


From: Matthew Wilcox <mawilcox@xxxxxxxxxxxxx>

Rename the function from page_cache_tree_delete_batch to just
page_cache_delete_batch.

Signed-off-by: Matthew Wilcox <mawilcox@xxxxxxxxxxxxx>
---
mm/filemap.c | 28 +++++++++++++---------------
1 file changed, 13 insertions(+), 15 deletions(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index 317a89df1945..d2a0031d61f5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -276,7 +276,7 @@ void delete_from_page_cache(struct page *page)
EXPORT_SYMBOL(delete_from_page_cache);

/*
- * page_cache_tree_delete_batch - delete several pages from page cache
+ * page_cache_delete_batch - delete several pages from page cache
* @mapping: the mapping to which pages belong
* @pvec: pagevec with pages to delete
*
@@ -289,23 +289,18 @@ EXPORT_SYMBOL(delete_from_page_cache);
*
* The function expects xa_lock to be held.
*/
-static void
-page_cache_tree_delete_batch(struct address_space *mapping,
+static void page_cache_delete_batch(struct address_space *mapping,
struct pagevec *pvec)
{
- struct radix_tree_iter iter;
- void **slot;
+ XA_STATE(xas, &mapping->pages, pvec->pages[0]->index);
int total_pages = 0;
int i = 0, tail_pages = 0;
struct page *page;
- pgoff_t start;

- start = pvec->pages[0]->index;
- radix_tree_for_each_slot(slot, &mapping->pages, &iter, start) {
+ mapping_set_update(&xas, mapping);
+ xas_for_each(&xas, page, ULONG_MAX) {
if (i >= pagevec_count(pvec) && !tail_pages)
break;
- page = radix_tree_deref_slot_protected(slot,
- &mapping->pages.xa_lock);
if (xa_is_value(page))
continue;
if (!tail_pages) {
@@ -314,8 +309,11 @@ page_cache_tree_delete_batch(struct address_space *mapping,
* have our pages locked so they are protected from
* being removed.
*/
- if (page != pvec->pages[i])
+ if (page != pvec->pages[i]) {
+ VM_BUG_ON_PAGE(page->index >
+ pvec->pages[i]->index, page);
continue;
+ }
WARN_ON_ONCE(!PageLocked(page));
if (PageTransHuge(page) && !PageHuge(page))
tail_pages = HPAGE_PMD_NR - 1;
@@ -326,11 +324,11 @@ page_cache_tree_delete_batch(struct address_space *mapping,
*/
i++;
} else {
+ VM_BUG_ON_PAGE(page->index + HPAGE_PMD_NR - tail_pages
+ != pvec->pages[i]->index, page);
tail_pages--;
}
- radix_tree_clear_tags(&mapping->pages, iter.node, slot);
- __radix_tree_replace(&mapping->pages, iter.node, slot, NULL,
- workingset_lookup_update(mapping));
+ xas_store(&xas, NULL);
total_pages++;
}
mapping->nrpages -= total_pages;
@@ -351,7 +349,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,

unaccount_page_cache_page(mapping, pvec->pages[i]);
}
- page_cache_tree_delete_batch(mapping, pvec);
+ page_cache_delete_batch(mapping, pvec);
xa_unlock_irqrestore(&mapping->pages, flags);

for (i = 0; i < pagevec_count(pvec); i++)
--
2.15.1