[PATCH v3 09/12] mm: prepare for hot-{add, remove} of sub-section ranges

From: Dan Williams
Date: Thu Jan 19 2017 - 17:24:00 EST


Prepare the memory hot-{add,remove} paths for handling sub-section
ranges by plumbing the starting page frame and number of pages being
handled through arch_{add,remove}_memory() to
sparse_{add,remove}_one_section().

This is simply plumbing, small cleanups, and some identifier renames. No
intended functional changes.

Cc: Michal Hocko <mhocko@xxxxxxxx>
Cc: Vlastimil Babka <vbabka@xxxxxxx>
Cc: Johannes Weiner <hannes@xxxxxxxxxxx>
Cc: Logan Gunthorpe <logang@xxxxxxxxxxxx>
Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Stephen Bates <stephen.bates@xxxxxxxxxxxxx>
Signed-off-by: Dan Williams <dan.j.williams@xxxxxxxxx>
---
arch/x86/mm/init_64.c | 11 +++++
include/linux/memory_hotplug.h | 6 ++-
mm/memory_hotplug.c | 85 ++++++++++++++++++++++------------------
mm/sparse.c | 6 ++-
4 files changed, 65 insertions(+), 43 deletions(-)

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3d719d48f11d..843512f492e0 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -650,6 +650,17 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;

+ /*
+ * Only allow partial section hotplug for ZONE_DEVICE ranges,
+ * since register_new_memory() requires section alignment, and
+ * CONFIG_SPARSEMEM_VMEMMAP=n requires sections to be fully
+ * populated.
+ */
+ if ((!IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP) || !for_device)
+ && ((start & ~PA_SECTION_MASK)
+ || (size & ~PA_SECTION_MASK)))
+ return -EINVAL;
+
init_memory_mapping(start, start + size);

ret = __add_pages(nid, zone, start_pfn, nr_pages);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 01033fadea47..a6ac3c975d5d 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -279,8 +279,10 @@ extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern bool is_memblock_offlined(struct memory_block *mem);
extern void remove_memory(int nid, u64 start, u64 size);
-extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn);
-extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
+extern int sparse_add_section(struct zone *zone, unsigned long pfn,
+ unsigned long nr_pages);
+extern void sparse_remove_section(struct zone *zone, struct mem_section *ms,
+ unsigned long pfn, unsigned long nr_pages,
unsigned long map_offset);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 684addad9708..a203a795a46c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -466,10 +466,10 @@ static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long s
pgdat->node_start_pfn;
}

-static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
+static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn,
+ unsigned long nr_pages)
{
struct pglist_data *pgdat = zone->zone_pgdat;
- int nr_pages = PAGES_PER_SECTION;
int nid = pgdat->node_id;
int zone_type;
unsigned long flags, pfn;
@@ -499,24 +499,21 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
}

static int __meminit __add_section(int nid, struct zone *zone,
- unsigned long phys_start_pfn)
+ unsigned long pfn, unsigned long nr_pages)
{
int ret;

- if (pfn_valid(phys_start_pfn))
- return -EEXIST;
-
- ret = sparse_add_one_section(zone, phys_start_pfn);
+ ret = sparse_add_section(zone, pfn, nr_pages);

if (ret < 0)
return ret;

- ret = __add_zone(zone, phys_start_pfn);
+ ret = __add_zone(zone, pfn, nr_pages);

if (ret < 0)
return ret;

- return register_new_memory(zone, nid, __pfn_to_section(phys_start_pfn));
+ return register_new_memory(zone, nid, __pfn_to_section(pfn));
}

/*
@@ -525,7 +522,7 @@ static int __meminit __add_section(int nid, struct zone *zone,
* call this function after deciding the zone to which to
* add the new pages.
*/
-int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
+int __ref __add_pages(int nid, struct zone *zone, unsigned long pfn,
unsigned long nr_pages)
{
int err = 0, i, start_sec, end_sec;
@@ -533,16 +530,12 @@ int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,

clear_zone_contiguous(zone);

- /* during initialize mem_map, align hot-added range to section */
- start_sec = pfn_to_section_nr(phys_start_pfn);
- end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
-
- altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn));
+ altmap = to_vmem_altmap((unsigned long) pfn_to_page(pfn));
if (altmap) {
/*
* Validate altmap is within bounds of the total request
*/
- if (altmap->base_pfn != phys_start_pfn
+ if (altmap->base_pfn != pfn
|| vmem_altmap_offset(altmap) > nr_pages) {
pr_warn_once("memory add fail, invalid altmap\n");
err = -EINVAL;
@@ -551,8 +544,16 @@ int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
altmap->alloc = 0;
}

+ start_sec = pfn_to_section_nr(pfn);
+ end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
for (i = start_sec; i <= end_sec; i++) {
- err = __add_section(nid, zone, section_nr_to_pfn(i));
+ unsigned long pfns;
+
+ pfns = min(nr_pages, PAGES_PER_SECTION
+ - (pfn & ~PAGE_SECTION_MASK));
+ err = __add_section(nid, zone, pfn, pfns);
+ pfn += pfns;
+ nr_pages -= pfns;

/*
* EEXIST is finally dealt with by ioresource collision
@@ -758,10 +759,10 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
pgdat->node_spanned_pages = 0;
}

-static void __remove_zone(struct zone *zone, unsigned long start_pfn)
+static void __remove_zone(struct zone *zone, unsigned long start_pfn,
+ unsigned long nr_pages)
{
struct pglist_data *pgdat = zone->zone_pgdat;
- int nr_pages = PAGES_PER_SECTION;
int zone_type;
unsigned long flags;

@@ -773,11 +774,10 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn)
pgdat_resize_unlock(zone->zone_pgdat, &flags);
}

-static int __remove_section(struct zone *zone, struct mem_section *ms,
- unsigned long map_offset)
+static int __remove_section(struct zone *zone, unsigned long pfn,
+ unsigned long nr_pages, unsigned long map_offset)
{
- unsigned long start_pfn;
- int scn_nr;
+ struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn));
int ret = -EINVAL;

if (!valid_section(ms))
@@ -787,11 +787,9 @@ static int __remove_section(struct zone *zone, struct mem_section *ms,
if (ret)
return ret;

- scn_nr = __section_nr(ms);
- start_pfn = section_nr_to_pfn(scn_nr);
- __remove_zone(zone, start_pfn);
+ __remove_zone(zone, pfn, nr_pages);

- sparse_remove_one_section(zone, ms, map_offset);
+ sparse_remove_section(zone, ms, pfn, nr_pages, map_offset);
return 0;
}

@@ -806,16 +804,15 @@ static int __remove_section(struct zone *zone, struct mem_section *ms,
* sure that pages are marked reserved and zones are adjust properly by
* calling offline_pages().
*/
-int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
+int __remove_pages(struct zone *zone, unsigned long pfn,
unsigned long nr_pages)
{
- unsigned long i;
unsigned long map_offset = 0;
- int sections_to_remove, ret = 0;
+ int i, start_sec, end_sec, ret = 0;

/* In the ZONE_DEVICE case device driver owns the memory region */
if (is_dev_zone(zone)) {
- struct page *page = pfn_to_page(phys_start_pfn);
+ struct page *page = pfn_to_page(pfn);
struct vmem_altmap *altmap;

altmap = to_vmem_altmap((unsigned long) page);
@@ -824,7 +821,7 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
} else {
resource_size_t start, size;

- start = phys_start_pfn << PAGE_SHIFT;
+ start = pfn << PAGE_SHIFT;
size = nr_pages * PAGE_SIZE;

ret = release_mem_region_adjustable(&iomem_resource, start,
@@ -840,16 +837,26 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
clear_zone_contiguous(zone);

/*
- * We can only remove entire sections
+ * Only ZONE_DEVICE memory is enabled to remove
+ * section-unaligned ranges. See register_new_memory() which
+ * assumes section alignment and is skipped for ZONE_DEVICE
+ * ranges.
*/
- BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
- BUG_ON(nr_pages % PAGES_PER_SECTION);
+ if (!is_dev_zone(zone) && ((pfn | nr_pages) & ~PAGE_SECTION_MASK)) {
+ WARN(1, "section unaligned removal not supported\n");
+ return -EINVAL;
+ }

- sections_to_remove = nr_pages / PAGES_PER_SECTION;
- for (i = 0; i < sections_to_remove; i++) {
- unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
+ start_sec = pfn_to_section_nr(pfn);
+ end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
+ for (i = start_sec; i <= end_sec; i++) {
+ unsigned long pfns;

- ret = __remove_section(zone, __pfn_to_section(pfn), map_offset);
+ pfns = min(nr_pages, PAGES_PER_SECTION
+ - (pfn & ~PAGE_SECTION_MASK));
+ ret = __remove_section(zone, pfn, pfns, map_offset);
+ pfn += pfns;
+ nr_pages -= pfns;
map_offset = 0;
if (ret)
break;
diff --git a/mm/sparse.c b/mm/sparse.c
index 97f91770e3d0..a8358d15a90d 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -753,7 +753,8 @@ static void free_map_bootmem(struct page *memmap)
#endif /* CONFIG_MEMORY_HOTREMOVE */
#endif /* CONFIG_SPARSEMEM_VMEMMAP */

-int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn)
+int __meminit sparse_add_section(struct zone *zone, unsigned long start_pfn,
+ unsigned long nr_pages)
{
unsigned long section_nr = pfn_to_section_nr(start_pfn);
struct pglist_data *pgdat = zone->zone_pgdat;
@@ -855,7 +856,8 @@ static void free_section_usage(struct page *memmap,
free_map_bootmem(memmap);
}

-void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
+void sparse_remove_section(struct zone *zone, struct mem_section *ms,
+ unsigned long pfn, unsigned long nr_pages,
unsigned long map_offset)
{
unsigned long flags;