[PATCH v2 2/3] arm64: Support huge vmalloc mappings

From: Kefeng Wang
Date: Mon Dec 27 2021 - 09:49:52 EST


This patch select HAVE_ARCH_HUGE_VMALLOC to let arm64 support huge
vmalloc mappings.

Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx>
---
Documentation/admin-guide/kernel-parameters.txt | 4 ++--
arch/arm64/Kconfig | 1 +
arch/arm64/kernel/module.c | 5 +++--
3 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 7b2f900fd243..e3f9fd7ec106 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1639,7 +1639,7 @@
precedence over memory_hotplug.memmap_on_memory.


- hugevmalloc= [PPC] Reguires CONFIG_HAVE_ARCH_HUGE_VMALLOC
+ hugevmalloc= [KNL,PPC,ARM64] Reguires CONFIG_HAVE_ARCH_HUGE_VMALLOC
Format: { on | off }
Default set by CONFIG_HUGE_VMALLOC_DEFAULT_ENABLED.

@@ -3424,7 +3424,7 @@

nohugeiomap [KNL,X86,PPC,ARM64] Disable kernel huge I/O mappings.

- nohugevmalloc [PPC] Disable kernel huge vmalloc mappings.
+ nohugevmalloc [KNL,PPC,ARM64] Disable kernel huge vmalloc mappings.

nosmt [KNL,S390] Disable symmetric multithreading (SMT).
Equivalent to smt=1.
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3bb0b67292b5..c34bbb4482b0 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -142,6 +142,7 @@ config ARM64
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_COMPILER_H
+ select HAVE_ARCH_HUGE_VMALLOC
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 309a27553c87..af7b4cbace2b 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -36,7 +36,8 @@ void *module_alloc(unsigned long size)
module_alloc_end = MODULES_END;

p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
- module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK,
+ module_alloc_end, gfp_mask, PAGE_KERNEL,
+ VM_DEFER_KMEMLEAK | VM_NO_HUGE_VMAP,
NUMA_NO_NODE, __builtin_return_address(0));

if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
@@ -55,7 +56,7 @@ void *module_alloc(unsigned long size)
*/
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
module_alloc_base + SZ_2G, GFP_KERNEL,
- PAGE_KERNEL, 0, NUMA_NO_NODE,
+ PAGE_KERNEL, VM_NO_HUGE_VMAP, NUMA_NO_NODE,
__builtin_return_address(0));

if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) {
--
2.26.2