[PATCH v1 RFC Zisslpcfi 08/20] riscv: ELF header parsing in GNU property for riscv zisslpcfi

From: Deepak Gupta
Date: Sun Feb 12 2023 - 23:54:51 EST


Binaries enabled for Zisslpcfi will have new instructions that may fault
on risc-v cpus which dont implement Zimops or Zicfi. This change adds

- support for parsing new backward and forward cfi flags in
PT_GNU_PROPERTY
- setting cfi state on recognizing cfi flags in ELF
- enable back cfi and forward cfi in sstatus

Signed-off-by: Deepak Gupta <debug@xxxxxxxxxxxx>
---
arch/riscv/include/asm/elf.h | 54 +++++++++++++++++++++++++++++
arch/riscv/kernel/process.c | 67 ++++++++++++++++++++++++++++++++++++
2 files changed, 121 insertions(+)

diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index e7acffdf21d2..60ac2d2390ee 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -14,6 +14,7 @@
#include <asm/auxvec.h>
#include <asm/byteorder.h>
#include <asm/cacheinfo.h>
+#include <linux/processor.h>

/*
* These are used to set parameters in the core dumps.
@@ -140,4 +141,57 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
compat_arch_setup_additional_pages

#endif /* CONFIG_COMPAT */
+
+#define RISCV_ELF_FCFI (1 << 0)
+#define RISCV_ELF_BCFI (1 << 1)
+
+#ifdef CONFIG_ARCH_BINFMT_ELF_STATE
+struct arch_elf_state {
+ int flags;
+};
+
+#define INIT_ARCH_ELF_STATE { \
+ .flags = 0, \
+}
+#endif
+
+#ifdef CONFIG_ARCH_USE_GNU_PROPERTY
+static inline int arch_parse_elf_property(u32 type, const void *data,
+ size_t datasz, bool compat,
+ struct arch_elf_state *arch)
+{
+ /*
+ * TODO: Do we want to support in 32bit/compat?
+ * may be return 0 for now.
+ */
+ if (IS_ENABLED(CONFIG_COMPAT) && compat)
+ return 0;
+ if ((type & GNU_PROPERTY_RISCV_FEATURE_1_AND) == GNU_PROPERTY_RISCV_FEATURE_1_AND) {
+ const u32 *p = data;
+
+ if (datasz != sizeof(*p))
+ return -ENOEXEC;
+ if (arch_supports_indirect_br_lp_instr() &&
+ (*p & GNU_PROPERTY_RISCV_FEATURE_1_FCFI))
+ arch->flags |= RISCV_ELF_FCFI;
+ if (arch_supports_shadow_stack() && (*p & GNU_PROPERTY_RISCV_FEATURE_1_BCFI))
+ arch->flags |= RISCV_ELF_BCFI;
+ }
+ return 0;
+}
+
+static inline int arch_elf_pt_proc(void *ehdr, void *phdr,
+ struct file *f, bool is_interp,
+ struct arch_elf_state *state)
+{
+ return 0;
+}
+
+static inline int arch_check_elf(void *ehdr, bool has_interp,
+ void *interp_ehdr,
+ struct arch_elf_state *state)
+{
+ return 0;
+}
+#endif
#endif /* _ASM_RISCV_ELF_H */
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 8955f2432c2d..db676262e61e 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -24,6 +24,7 @@
#include <asm/switch_to.h>
#include <asm/thread_info.h>
#include <asm/cpuidle.h>
+#include <linux/mman.h>

register unsigned long gp_in_global __asm__("gp");

@@ -135,6 +136,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
else
regs->status |= SR_UXL_64;
#endif
+#ifdef CONFIG_USER_SHADOW_STACK
+ if (current_thread_info()->user_cfi_state.ufcfi_en)
+ regs->status |= SR_UFCFIEN;
+#endif
+#ifdef CONFIG_USER_INDIRECT_BR_LP
+ if (current_thread_info()->user_cfi_state.ubcfi_en)
+ regs->status |= SR_UBCFIEN;
+#endif
}

void flush_thread(void)
@@ -189,3 +198,61 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.sp = (unsigned long)childregs; /* kernel sp */
return 0;
}
+
+
+int allocate_shadow_stack(unsigned long *shadow_stack_base, unsigned long *shdw_size)
+{
+ int flags = MAP_ANONYMOUS | MAP_PRIVATE;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr, populate, size;
+ *shadow_stack = 0;
+
+ if (!shdw_size)
+ return -EINVAL;
+
+ size = *shdw_size;
+
+ /* If size is 0, then try to calculate yourself */
+ if (size == 0)
+ size = round_up(min_t(unsigned long long, rlimit(RLIMIT_STACK), SZ_4G), PAGE_SIZE);
+ mmap_write_lock(mm);
+ addr = do_mmap(NULL, 0, size, PROT_SHADOWSTACK, flags, 0,
+ &populate, NULL);
+ mmap_write_unlock(mm);
+ if (IS_ERR_VALUE(addr))
+ return PTR_ERR((void *)addr);
+ *shadow_stack_base = addr;
+ *shdw_size = size;
+ return 0;
+}
+
+#if defined(CONFIG_USER_SHADOW_STACK) || defined(CONFIG_USER_INDIRECT_BR_LP)
+/* gets called from load_elf_binary(). This'll setup shadow stack and forward cfi enable */
+int arch_elf_setup_cfi_state(const struct arch_elf_state *state)
+{
+ int ret = 0;
+ unsigned long shadow_stack_base = 0;
+ unsigned long shadow_stk_size = 0;
+ struct thread_info *info = NULL;
+
+ info = current_thread_info();
+ /* setup back cfi state */
+ /* setup cfi state only if implementation supports it */
+ if (arch_supports_shadow_stack() && (state->flags & RISCV_ELF_BCFI)) {
+ info->user_cfi_state.ubcfi_en = 1;
+ ret = allocate_shadow_stack(&shadow_stack_base, &shadow_stk_size);
+ if (ret)
+ return ret;
+
+ info->user_cfi_state.user_shdw_stk = (shadow_stack_base + shadow_stk_size);
+ info->user_cfi_state.shdw_stk_base = shadow_stack_base;
+ }
+ /* setup forward cfi state */
+ if (arch_supports_indirect_br_lp_instr() && (state->flags & RISCV_ELF_FCFI)) {
+ info->user_cfi_state.ufcfi_en = 1;
+ info->user_cfi_state.lp_label = 0;
+ }
+
+ return ret;
+}
+#endif
\ No newline at end of file
--
2.25.1