[PATCH] mm-compaction-cleanup-isolate_freepages-fix3

From: Joonsoo Kim
Date: Fri Apr 25 2014 - 04:12:58 EST


What I did here is taking end_pfn out of the loop and considering zone
boundary once. After then, we can just set previous pfn to end_pfn on
every iteration to move scanning window. With this change, we can remove
local variable, z_end_pfn.

Another things I did are removing max() operation and un-needed
assignment to isolate variable.

In addition, I change both the variable names, from pfn and
end_pfn to block_start_pfn and block_end_pfn, respectively.
They represent their meaning perfectly.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>

diff --git a/mm/compaction.c b/mm/compaction.c
index 1c992dc..ba80bea 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -671,10 +671,10 @@ static void isolate_freepages(struct zone *zone,
struct compact_control *cc)
{
struct page *page;
- unsigned long pfn; /* scanning cursor */
+ unsigned long block_start_pfn; /* start of current pageblock */
+ unsigned long block_end_pfn; /* end of current pageblock */
unsigned long low_pfn; /* lowest pfn scanner is able to scan */
unsigned long next_free_pfn; /* start pfn for scaning at next round */
- unsigned long z_end_pfn; /* zone's end pfn */
int nr_freepages = cc->nr_freepages;
struct list_head *freelist = &cc->freepages;

@@ -682,31 +682,33 @@ static void isolate_freepages(struct zone *zone,
* Initialise the free scanner. The starting point is where we last
* successfully isolated from, zone-cached value, or the end of the
* zone when isolating for the first time. We need this aligned to
- * the pageblock boundary, because we do pfn -= pageblock_nr_pages
- * in the for loop.
+ * the pageblock boundary, because we do
+ * block_start_pfn -= pageblock_nr_pages in the for loop.
+ * For ending point, take care when isolating in last pageblock of a
+ * a zone which ends in the middle of a pageblock.
* The low boundary is the end of the pageblock the migration scanner
* is using.
*/
- pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
+ block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
+ block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
+ zone_end_pfn(zone));
low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);

/*
- * Seed the value for max(next_free_pfn, pfn) updates. If no pages are
- * isolated, the pfn < low_pfn check will kick in.
+ * If no pages are isolated, the block_start_pfn < low_pfn check
+ * will kick in.
*/
next_free_pfn = 0;

- z_end_pfn = zone_end_pfn(zone);
-
/*
* Isolate free pages until enough are available to migrate the
* pages on cc->migratepages. We stop searching if the migrate
* and free page scanners meet or enough free pages are isolated.
*/
- for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
- pfn -= pageblock_nr_pages) {
+ for (;block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
+ block_end_pfn = block_start_pfn,
+ block_start_pfn -= pageblock_nr_pages) {
unsigned long isolated;
- unsigned long end_pfn;

/*
* This can iterate a massively long zone without finding any
@@ -715,7 +717,7 @@ static void isolate_freepages(struct zone *zone,
*/
cond_resched();

- if (!pfn_valid(pfn))
+ if (!pfn_valid(block_start_pfn))
continue;

/*
@@ -725,7 +727,7 @@ static void isolate_freepages(struct zone *zone,
* i.e. it's possible that all pages within a zones range of
* pages do not belong to a single zone.
*/
- page = pfn_to_page(pfn);
+ page = pfn_to_page(block_start_pfn);
if (page_zone(page) != zone)
continue;

@@ -738,15 +740,8 @@ static void isolate_freepages(struct zone *zone,
continue;

/* Found a block suitable for isolating free pages from */
- isolated = 0;
-
- /*
- * Take care when isolating in last pageblock of a zone which
- * ends in the middle of a pageblock.
- */
- end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
- isolated = isolate_freepages_block(cc, pfn, end_pfn,
- freelist, false);
+ isolated = isolate_freepages_block(cc, block_start_pfn,
+ block_end_pfn, freelist, false);
nr_freepages += isolated;

/*
@@ -754,9 +749,9 @@ static void isolate_freepages(struct zone *zone,
* looking for free pages, the search will restart here as
* page migration may have returned some pages to the allocator
*/
- if (isolated) {
+ if (isolated && next_free_pfn == 0) {
cc->finished_update_free = true;
- next_free_pfn = max(next_free_pfn, pfn);
+ next_free_pfn = block_start_pfn;
}
}

@@ -767,7 +762,7 @@ static void isolate_freepages(struct zone *zone,
* If we crossed the migrate scanner, we want to keep it that way
* so that compact_finished() may detect this
*/
- if (pfn < low_pfn)
+ if (block_start_pfn < low_pfn)
next_free_pfn = cc->migrate_pfn;

cc->free_pfn = next_free_pfn;
--
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/