[RFC PATCH 8/8] memory-hotplug: implement arch_remove_memory()

From: Wen Congyang
Date: Fri Jul 20 2012 - 03:09:16 EST


Set the entry for the removed memory to 0. If the entry related meory
is not whole removed, split it to smaller page, and clear it.

CC: David Rientjes <rientjes@xxxxxxxxxx>
CC: Jiang Liu <liuj97@xxxxxxxxx>
CC: Len Brown <len.brown@xxxxxxxxx>
CC: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx>
CC: Paul Mackerras <paulus@xxxxxxxxx>
CC: Christoph Lameter <cl@xxxxxxxxx>
Cc: Minchan Kim <minchan.kim@xxxxxxxxx>
CC: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
CC: KOSAKI Motohiro <kosaki.motohiro@xxxxxxxxxxxxxx>
CC: Yasuaki Ishimatsu <isimatu.yasuaki@xxxxxxxxxxxxxx>
Signed-off-by: Wen Congyang <wency@xxxxxxxxxxxxxx>
---
arch/x86/mm/init_64.c | 159 ++++++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 156 insertions(+), 3 deletions(-)

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 78b94bc..d78f352 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -675,11 +675,164 @@ int arch_add_memory(int nid, u64 start, u64 size)
}
EXPORT_SYMBOL_GPL(arch_add_memory);

+static void __meminit
+phys_pte_remove(pte_t *pte_page, unsigned long addr, unsigned long end)
+{
+ unsigned pages = 0;
+ int i = pte_index(addr);
+
+ pte_t *pte = pte_page + pte_index(addr);
+
+ for (; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {
+
+ if (addr >= end)
+ break;
+
+ if (!pte_present(*pte))
+ continue;
+
+ pages++;
+ set_pte(pte, __pte(0));
+ }
+
+ update_page_count(PG_LEVEL_4K, -pages);
+}
+
+static void __meminit
+phys_pmd_remove(pmd_t *pmd_page, unsigned long addr, unsigned long end)
+{
+ unsigned long pages = 0, next;
+ int i = pmd_index(addr);
+
+ for (; i < PTRS_PER_PMD; i++, addr = next) {
+ unsigned long pte_phys;
+ pmd_t *pmd = pmd_page + pmd_index(addr);
+ pte_t *pte;
+
+ if (addr >= end)
+ break;
+
+ next = (addr & PMD_MASK) + PMD_SIZE;
+
+ if (!pmd_present(*pmd))
+ continue;
+
+ if (pmd_large(*pmd)) {
+ if ((addr & ~PMD_MASK) == 0 && next <= end) {
+ set_pmd(pmd, __pmd(0));
+ pages++;
+ continue;
+ }
+
+ /*
+ * We use 2M page, but we need to remove part of them,
+ * so split 2M page to 4K page.
+ */
+ pte = alloc_low_page(&pte_phys);
+ __split_large_page((pte_t *)pmd, addr, pte);
+
+ spin_lock(&init_mm.page_table_lock);
+ pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
+ spin_unlock(&init_mm.page_table_lock);
+ }
+
+ spin_lock(&init_mm.page_table_lock);
+ pte = map_low_page((pte_t *)pmd_page_vaddr(*pmd));
+ phys_pte_remove(pte, addr, end);
+ unmap_low_page(pte);
+ spin_unlock(&init_mm.page_table_lock);
+ }
+ update_page_count(PG_LEVEL_2M, -pages);
+}
+
+static void __meminit
+phys_pud_remove(pud_t *pud_page, unsigned long addr, unsigned long end)
+{
+ unsigned long pages = 0, next;
+ int i = pud_index(addr);
+
+ for (; i < PTRS_PER_PUD; i++, addr = next) {
+ unsigned long pmd_phys;
+ pud_t *pud = pud_page + pud_index(addr);
+ pmd_t *pmd;
+
+ if (addr >= end)
+ break;
+
+ next = (addr & PUD_MASK) + PUD_SIZE;
+
+ if (!pud_present(*pud))
+ continue;
+
+ if (pud_large(*pud)) {
+ if ((addr & ~PUD_MASK) == 0 && next <= end) {
+ set_pud(pud, __pud(0));
+ pages++;
+ continue;
+ }
+
+ /*
+ * We use 1G page, but we need to remove part of them,
+ * so split 1G page to 2M page.
+ */
+ pmd = alloc_low_page(&pmd_phys);
+ __split_large_page((pte_t *)pud, addr, (pte_t *)pmd);
+
+ spin_lock(&init_mm.page_table_lock);
+ pud_populate(&init_mm, pud, __va(pmd_phys));
+ spin_unlock(&init_mm.page_table_lock);
+ }
+
+ pmd = map_low_page(pmd_offset(pud, 0));
+ phys_pmd_remove(pmd, addr, end);
+ unmap_low_page(pmd);
+ __flush_tlb_all();
+ }
+ __flush_tlb_all();
+
+ update_page_count(PG_LEVEL_1G, -pages);
+}
+
+void __meminit
+kernel_physical_mapping_remove(unsigned long start, unsigned long end)
+{
+ unsigned long next;
+
+ start = (unsigned long)__va(start);
+ end = (unsigned long)__va(end);
+
+ for (; start < end; start = next) {
+ pgd_t *pgd = pgd_offset_k(start);
+ pud_t *pud;
+
+ next = (start + PGDIR_SIZE) & PGDIR_MASK;
+ if (next > end)
+ next = end;
+
+ if (!pgd_present(*pgd))
+ continue;
+
+ pud = map_low_page((pud_t *)pgd_page_vaddr(*pgd));
+ phys_pud_remove(pud, __pa(start), __pa(end));
+ unmap_low_page(pud);
+ }
+
+ __flush_tlb_all();
+}
+
#ifdef CONFIG_MEMORY_HOTREMOVE
-int arch_remove_memory(unsigned long start, unsigned long size)
+int __ref arch_remove_memory(unsigned long start, unsigned long size)
{
- /* TODO */
- return -EBUSY;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ int ret;
+
+ ret = __remove_pages(start_pfn, nr_pages);
+ WARN_ON_ONCE(ret);
+
+ kernel_physical_mapping_remove(start, start + size);
+
+ return ret;
}
#endif
#endif /* CONFIG_MEMORY_HOTPLUG */
--
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/