[PATCH v4 1/3] x86: Introduce a new constant KERNEL_MAPPING_SIZE

From: Baoquan He
Date: Thu Feb 02 2017 - 07:55:03 EST


In x86, KERNEL_IMAGE_SIZE is used to limit the size of kernel image in
running space, but also represents the size of kernel image mapping area.
This looks good when kernel virtual address is invariable inside 512M
area and kernel image size is not bigger than 512M.

Along with the adding of kaslr, in x86_64 the area of kernel mapping is
extended up another 512M. It becomes improper to let KERNEL_IMAGE_SIZE
alone still play two roles now.

So introduce a new constant KERNEL_MAPPING_SIZE to represent the size of
kernel mapping area. Let KERNEL_IMAGE_SIZE be as its name is saying. In
x86_32 though kernel image size is the same as kernel mapping size, for
generic handling in kaslr.c KERNEL_MAPPING_SIZE is also introduced.

In this patch, just add KERNEL_MAPPING_SIZE and replace KERNEL_IMAGE_SIZE
with it in the relevant places. No functional change.

Signed-off-by: Baoquan He <bhe@xxxxxxxxxx>
---
arch/x86/boot/compressed/kaslr.c | 10 +++++-----
arch/x86/include/asm/page_32_types.h | 6 ++++++
arch/x86/include/asm/page_64_types.h | 12 +++++++++---
arch/x86/include/asm/pgtable_64_types.h | 2 +-
arch/x86/kernel/head64.c | 4 ++--
arch/x86/kernel/head_64.S | 2 +-
arch/x86/kernel/machine_kexec_64.c | 2 +-
arch/x86/mm/init_64.c | 2 +-
arch/x86/mm/physaddr.c | 6 +++---
9 files changed, 29 insertions(+), 17 deletions(-)

diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index a66854d..6d2424e 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -311,7 +311,7 @@ static void process_e820_entry(struct e820entry *entry,
return;

/* On 32-bit, ignore entries entirely above our maximum. */
- if (IS_ENABLED(CONFIG_X86_32) && entry->addr >= KERNEL_IMAGE_SIZE)
+ if (IS_ENABLED(CONFIG_X86_32) && entry->addr >= KERNEL_MAPPING_SIZE)
return;

/* Ignore entries entirely below our minimum. */
@@ -341,8 +341,8 @@ static void process_e820_entry(struct e820entry *entry,

/* On 32-bit, reduce region size to fit within max size. */
if (IS_ENABLED(CONFIG_X86_32) &&
- region.start + region.size > KERNEL_IMAGE_SIZE)
- region.size = KERNEL_IMAGE_SIZE - region.start;
+ region.start + region.size > KERNEL_MAPPING_SIZE)
+ region.size = KERNEL_MAPPING_SIZE - region.start;

/* Return if region can't contain decompressed kernel */
if (region.size < image_size)
@@ -408,9 +408,9 @@ static unsigned long find_random_virt_addr(unsigned long minimum,
/*
* There are how many CONFIG_PHYSICAL_ALIGN-sized slots
* that can hold image_size within the range of minimum to
- * KERNEL_IMAGE_SIZE?
+ * KERNEL_MAPPING_SIZE?
*/
- slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
+ slots = (KERNEL_MAPPING_SIZE - minimum - image_size) /
CONFIG_PHYSICAL_ALIGN + 1;

random_addr = kaslr_get_random_long("Virtual") % slots;
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index 3bae496..e93de86 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -42,6 +42,12 @@
*/
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)

+/*
+ * Kernel mapping size is limited to 512 MB which is equal to kernel image
+ * size.
+ */
+#define KERNEL_MAPPING_SIZE KERNEL_IMAGE_SIZE
+
#ifndef __ASSEMBLY__

/*
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 9215e05..24c9098 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -50,16 +50,22 @@
#define __VIRTUAL_MASK_SHIFT 47

/*
- * Kernel image size is limited to 1GiB due to the fixmap living in the
+ * Kernel image size is limited to 512 MB. The kernel code+data+bss
+ * must not be bigger than that.
+ */
+#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
+
+/*
+ * Kernel mapping size is limited to 1GiB due to the fixmap living in the
* next 1GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). Use
* 512MiB by default, leaving 1.5GiB for modules once the page tables
* are fully set up. If kernel ASLR is configured, it can extend the
* kernel page table mapping, reducing the size of the modules area.
*/
#if defined(CONFIG_RANDOMIZE_BASE)
-#define KERNEL_IMAGE_SIZE (1024 * 1024 * 1024)
+#define KERNEL_MAPPING_SIZE (1024 * 1024 * 1024)
#else
-#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
+#define KERNEL_MAPPING_SIZE (512 * 1024 * 1024)
#endif

#endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 3a26420..a357050 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -66,7 +66,7 @@ typedef struct { pteval_t pte; } pte_t;
#define VMEMMAP_START __VMEMMAP_BASE
#endif /* CONFIG_RANDOMIZE_MEMORY */
#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
-#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
+#define MODULES_VADDR (__START_KERNEL_map + KERNEL_MAPPING_SIZE)
#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
#define ESPFIX_PGD_ENTRY _AC(-2, UL)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 54a2372..7484d86 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -139,8 +139,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
* area mappings. (these are purely build-time and produce no code)
*/
BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
- BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
- BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
+ BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_MAPPING_SIZE);
+ BUILD_BUG_ON(MODULES_LEN + KERNEL_MAPPING_SIZE > 2*PUD_SIZE);
BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index b467b14..cdfe4dc 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -468,7 +468,7 @@ NEXT_PAGE(level2_kernel_pgt)
* too.)
*/
PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
- KERNEL_IMAGE_SIZE/PMD_SIZE)
+ KERNEL_MAPPING_SIZE/PMD_SIZE)

NEXT_PAGE(level2_fixmap_pgt)
.fill 506,8,0
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 307b1f4..817e342 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -337,7 +337,7 @@ void arch_crash_save_vmcoreinfo(void)
#endif
vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
kaslr_offset());
- VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
+ VMCOREINFO_NUMBER(KERNEL_MAPPING_SIZE);
}

/* arch-dependent functionality related to kexec file-based syscall */
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index af85b68..57fdea5 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -297,7 +297,7 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
void __init cleanup_highmap(void)
{
unsigned long vaddr = __START_KERNEL_map;
- unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
+ unsigned long vaddr_end = __START_KERNEL_map + KERNEL_MAPPING_SIZE;
unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
pmd_t *pmd = level2_kernel_pgt;

diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
index cfc3b91..c0b70fc 100644
--- a/arch/x86/mm/physaddr.c
+++ b/arch/x86/mm/physaddr.c
@@ -18,7 +18,7 @@ unsigned long __phys_addr(unsigned long x)
if (unlikely(x > y)) {
x = y + phys_base;

- VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE);
+ VIRTUAL_BUG_ON(y >= KERNEL_MAPPING_SIZE);
} else {
x = y + (__START_KERNEL_map - PAGE_OFFSET);

@@ -35,7 +35,7 @@ unsigned long __phys_addr_symbol(unsigned long x)
unsigned long y = x - __START_KERNEL_map;

/* only check upper bounds since lower bounds will trigger carry */
- VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE);
+ VIRTUAL_BUG_ON(y >= KERNEL_MAPPING_SIZE);

return y + phys_base;
}
@@ -50,7 +50,7 @@ bool __virt_addr_valid(unsigned long x)
if (unlikely(x > y)) {
x = y + phys_base;

- if (y >= KERNEL_IMAGE_SIZE)
+ if (y >= KERNEL_MAPPING_SIZE)
return false;
} else {
x = y + (__START_KERNEL_map - PAGE_OFFSET);
--
2.5.5