[PATCH] mm: cma: Discard clean pages during contiguous allocationinstead of migration

From: Minchan Kim
Date: Thu Sep 06 2012 - 22:20:48 EST


This patch introudes MIGRATE_DISCARD mode in migration.
It drops *clean cache pages* instead of migration so that
migration latency could be reduced by avoiding (memcpy + page remapping).
It's useful for CMA because latency of migration is very important rather
than eviction of background processes's workingset. In addition, it needs
less free pages for migration targets so it could avoid memory reclaiming
to get free pages, which is another factor increase latency.

Cc: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx>
Cc: Michal Nazarewicz <mina86@xxxxxxxxxx>
Cc: Rik van Riel <riel@xxxxxxxxxx>
Signed-off-by: Mel Gorman <mgorman@xxxxxxx>
Signed-off-by: Minchan Kim <minchan@xxxxxxxxxx>
---
mm/internal.h | 2 ++
mm/page_alloc.c | 2 ++
mm/vmscan.c | 42 ++++++++++++++++++++++++++++++++++++------
3 files changed, 40 insertions(+), 6 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index 3314f79..be09a7e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -355,3 +355,5 @@ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
unsigned long, unsigned long);

extern void set_pageblock_order(void);
+unsigned long reclaim_clean_pages_from_list(struct zone *zone,
+ struct list_head *page_list);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ba3100a..bf35e59 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5668,6 +5668,8 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
break;
}

+ reclaim_clean_pages_from_list(&cc.migratepages, cc.zone);
+
ret = migrate_pages(&cc.migratepages,
__alloc_contig_migrate_alloc,
0, false, MIGRATE_SYNC);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8d01243..525355e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -674,8 +674,10 @@ static enum page_references page_check_references(struct page *page,
static unsigned long shrink_page_list(struct list_head *page_list,
struct zone *zone,
struct scan_control *sc,
+ enum ttu_flags ttu_flags,
unsigned long *ret_nr_dirty,
- unsigned long *ret_nr_writeback)
+ unsigned long *ret_nr_writeback,
+ bool force_reclaim)
{
LIST_HEAD(ret_pages);
LIST_HEAD(free_pages);
@@ -689,10 +691,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,

mem_cgroup_uncharge_start();
while (!list_empty(page_list)) {
- enum page_references references;
struct address_space *mapping;
struct page *page;
int may_enter_fs;
+ enum page_references references = PAGEREF_RECLAIM;

cond_resched();

@@ -758,7 +760,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
wait_on_page_writeback(page);
}

- references = page_check_references(page, sc);
+ if (!force_reclaim)
+ references = page_check_references(page, sc);
+
switch (references) {
case PAGEREF_ACTIVATE:
goto activate_locked;
@@ -788,7 +792,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* processes. Try to unmap it here.
*/
if (page_mapped(page) && mapping) {
- switch (try_to_unmap(page, TTU_UNMAP)) {
+ switch (try_to_unmap(page, ttu_flags)) {
case SWAP_FAIL:
goto activate_locked;
case SWAP_AGAIN:
@@ -960,6 +964,32 @@ keep:
return nr_reclaimed;
}

+unsigned long reclaim_clean_pages_from_list(struct zone *zone,
+ struct list_head *page_list)
+{
+ struct scan_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .priority = DEF_PRIORITY,
+ .may_unmap = 1,
+ };
+ unsigned long ret, dummy1, dummy2;
+ struct page *page, *next;
+ LIST_HEAD(clean_pages);
+
+ list_for_each_entry_safe(page, next, page_list, lru) {
+ if (page_is_file_cache(page) && !PageDirty(page)) {
+ ClearPageActive(page);
+ list_move(&page->lru, &clean_pages);
+ }
+ }
+
+ ret = shrink_page_list(&clean_pages, zone, &sc,
+ TTU_UNMAP|TTU_IGNORE_ACCESS,
+ &dummy1, &dummy2, true);
+ list_splice(&clean_pages, page_list);
+ return ret;
+}
+
/*
* Attempt to remove the specified page from its LRU. Only take this page
* if it is of the appropriate PageActive status. Pages which are being
@@ -1278,8 +1308,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
if (nr_taken == 0)
return 0;

- nr_reclaimed = shrink_page_list(&page_list, zone, sc,
- &nr_dirty, &nr_writeback);
+ nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
+ &nr_dirty, &nr_writeback, false);

spin_lock_irq(&zone->lru_lock);

--
1.7.9.5

--
Kind regards,
Minchan Kim
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/