[PATCH, RFC 07/16] thp, mm: rewrite delete_from_page_cache() to support huge pages

From: Kirill A. Shutemov
Date: Mon Jan 28 2013 - 04:24:38 EST


From: "Kirill A. Shutemov" <kirill.shutemov@xxxxxxxxxxxxxxx>

As with add_to_page_cache_locked() we handle HPAGE_CACHE_NR pages a
time.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx>
---
mm/filemap.c | 27 +++++++++++++++++++++------
1 file changed, 21 insertions(+), 6 deletions(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index fa2fdab..a4b4fd5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -112,6 +112,7 @@
void __delete_from_page_cache(struct page *page)
{
struct address_space *mapping = page->mapping;
+ int nr = 1;

/*
* if we're uptodate, flush out into the cleancache, otherwise
@@ -123,13 +124,23 @@ void __delete_from_page_cache(struct page *page)
else
cleancache_invalidate_page(mapping, page);

- radix_tree_delete(&mapping->page_tree, page->index);
+ if (PageTransHuge(page)) {
+ int i;
+
+ for (i = 0; i < HPAGE_CACHE_NR; i++)
+ radix_tree_delete(&mapping->page_tree, page->index + i);
+ nr = HPAGE_CACHE_NR;
+ } else {
+ radix_tree_delete(&mapping->page_tree, page->index);
+ }
+
page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
- mapping->nrpages--;
- __dec_zone_page_state(page, NR_FILE_PAGES);
+
+ mapping->nrpages -= nr;
+ __mod_zone_page_state(page_zone(page), NR_FILE_PAGES, -nr);
if (PageSwapBacked(page))
- __dec_zone_page_state(page, NR_SHMEM);
+ __mod_zone_page_state(page_zone(page), NR_SHMEM, -nr);
BUG_ON(page_mapped(page));

/*
@@ -140,8 +151,8 @@ void __delete_from_page_cache(struct page *page)
* having removed the page entirely.
*/
if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
- dec_zone_page_state(page, NR_FILE_DIRTY);
- dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
+ mod_zone_page_state(page_zone(page), NR_FILE_DIRTY, -nr);
+ add_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE, -nr);
}
}

@@ -157,6 +168,7 @@ void delete_from_page_cache(struct page *page)
{
struct address_space *mapping = page->mapping;
void (*freepage)(struct page *);
+ int i;

BUG_ON(!PageLocked(page));

@@ -168,6 +180,9 @@ void delete_from_page_cache(struct page *page)

if (freepage)
freepage(page);
+ if (PageTransHuge(page))
+ for (i = 1; i < HPAGE_CACHE_NR; i++)
+ page_cache_release(page);
page_cache_release(page);
}
EXPORT_SYMBOL(delete_from_page_cache);
--
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/