[PATCH] free swap space of (re)activated pages

From: Rik van Riel
Date: Fri Mar 02 2007 - 15:32:00 EST


Hi Andrew,

the attached patch frees the swap space of already resident pages
when swap space starts getting tight, instead of only freeing up
the swap space taken up by newly swapped in pages.

This should result in the swap space of pages that remain resident
in memory being freed, allowing kswapd more chances to actually swap
a page out (instead of rotating it back onto the active list).

Signed-off-by: Rik van Riel <riel@xxxxxxxxxx>

--
Politics is the struggle between those who want to make their country
the best in the world, and those who believe it already is. Each group
calls the other unpatriotic.
--- linux-2.6.20.noarch/mm/vmscan.c.swapfree 2007-02-20 06:44:13.000000000 -0500
+++ linux-2.6.20.noarch/mm/vmscan.c 2007-02-20 06:54:10.000000000 -0500
@@ -587,6 +587,9 @@ free_it:
continue;

activate_locked:
+ /* Not a candidate for swapping, so reclaim swap space. */
+ if (PageSwapCache(page) && vm_swap_full())
+ remove_exclusive_swap_page(page);
SetPageActive(page);
pgactivate++;
keep_locked:
@@ -889,6 +892,8 @@ force_reclaim_mapped:
__mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
pgmoved = 0;
spin_unlock_irq(&zone->lru_lock);
+ if (vm_swap_full())
+ pagevec_swap_free(&pvec);
__pagevec_release(&pvec);
spin_lock_irq(&zone->lru_lock);
}
@@ -899,6 +904,8 @@ force_reclaim_mapped:
__count_vm_events(PGDEACTIVATE, pgdeactivate);
spin_unlock_irq(&zone->lru_lock);

+ if (vm_swap_full())
+ pagevec_swap_free(&pvec);
pagevec_release(&pvec);
}

--- linux-2.6.20.noarch/mm/swap.c.swapfree 2007-02-04 13:44:54.000000000 -0500
+++ linux-2.6.20.noarch/mm/swap.c 2007-02-20 06:44:17.000000000 -0500
@@ -420,6 +420,26 @@ void pagevec_strip(struct pagevec *pvec)
}
}

+/*
+ * Try to free swap space from the pages in a pagevec
+ */
+void pagevec_swap_free(struct pagevec *pvec)
+{
+ int i;
+
+ for (i = 0; i < pagevec_count(pvec); i++) {
+ struct page *page = pvec->pages[i];
+
+ if (PageSwapCache(page) && !TestSetPageLocked(page)) {
+ if (PageSwapCache(page))
+ remove_exclusive_swap_page(page);
+ unlock_page(page);
+ if (printk_ratelimit())
+ printk("kswapd freed a swap space\n");
+ }
+ }
+}
+
/**
* pagevec_lookup - gang pagecache lookup
* @pvec: Where the resulting pages are placed
--- linux-2.6.20.noarch/include/linux/pagevec.h.swapfree 2007-02-04 13:44:54.000000000 -0500
+++ linux-2.6.20.noarch/include/linux/pagevec.h 2007-02-20 06:44:17.000000000 -0500
@@ -26,6 +26,7 @@ void __pagevec_free(struct pagevec *pvec
void __pagevec_lru_add(struct pagevec *pvec);
void __pagevec_lru_add_active(struct pagevec *pvec);
void pagevec_strip(struct pagevec *pvec);
+void pagevec_swap_free(struct pagevec *pvec);
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
pgoff_t start, unsigned nr_pages);
unsigned pagevec_lookup_tag(struct pagevec *pvec,