[PATCH v10 08/13] x86/um/vdso: nommu: vdso memory update

From: Hajime Tazaki
Date: Sun Jun 22 2025 - 17:34:52 EST


On !MMU mode, the address of vdso is accessible from userspace. This
commit implements the entry point by pointing a block of page address.

This commit also add memory permission configuration of vdso page to be
executable.

Signed-off-by: Hajime Tazaki <thehajime@xxxxxxxxx>
Signed-off-by: Ricardo Koller <ricarkol@xxxxxxxxxx>
---
arch/x86/um/vdso/vma.c | 17 +++++++++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)

diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c
index dc8dfb2abd80..1c8c39f87681 100644
--- a/arch/x86/um/vdso/vma.c
+++ b/arch/x86/um/vdso/vma.c
@@ -9,6 +9,7 @@
#include <asm/page.h>
#include <asm/elf.h>
#include <linux/init.h>
+#include <os.h>

static unsigned int __read_mostly vdso_enabled = 1;
unsigned long um_vdso_addr;
@@ -21,14 +22,24 @@ static int __init init_vdso(void)
{
BUG_ON(vdso_end - vdso_start > PAGE_SIZE);

- um_vdso_addr = task_size - PAGE_SIZE;
-
um_vdso = alloc_page(GFP_KERNEL);
if (!um_vdso)
goto oom;

copy_page(page_address(um_vdso), vdso_start);

+#ifdef CONFIG_MMU
+ um_vdso_addr = task_size - PAGE_SIZE;
+#else
+ /* this is fine with NOMMU as everything is accessible */
+ um_vdso_addr = (unsigned long)page_address(um_vdso);
+ os_protect_memory((void *)um_vdso_addr, vdso_end - vdso_start, 1, 0, 1);
+#endif
+
+ pr_info("vdso_start=%lx um_vdso_addr=%lx pg_um_vdso=%lx",
+ (unsigned long)vdso_start, um_vdso_addr,
+ (unsigned long)page_address(um_vdso));
+
return 0;

oom:
@@ -39,6 +50,7 @@ static int __init init_vdso(void)
}
subsys_initcall(init_vdso);

+#ifdef CONFIG_MMU
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct vm_area_struct *vma;
@@ -63,3 +75,4 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)

return IS_ERR(vma) ? PTR_ERR(vma) : 0;
}
+#endif
--
2.43.0