[RFC PATCH -next V3 5/6] arm64: add {get, put}_user to machine check safe

From: Tong Tiangen
Date: Tue Apr 12 2022 - 04:11:10 EST


Add {get, put}_user() to machine check safe.

If get/put fail due to hardware memory error, if get/put fail due to
hardware memory error, only the relevant processes are affected, so killing
the user process and isolate the user page with hardware memory errors is a
more reasonable choice than kernel panic.

Add new extable type EX_TYPE_UACCESS_MC_ERR_ZERO which can be used for
uaccess that can be recovered from hardware memory errors. The difference
from EX_TYPE_UACCESS_MC is that this type also sets additional two target
register which save error code and value needs to be set zero.

Signed-off-by: Tong Tiangen <tongtiangen@xxxxxxxxxx>
---
arch/arm64/include/asm/asm-extable.h | 14 ++++++++++++++
arch/arm64/include/asm/uaccess.h | 4 ++--
arch/arm64/mm/extable.c | 3 +++
3 files changed, 19 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
index 8af4e7cc9578..62eafb651773 100644
--- a/arch/arm64/include/asm/asm-extable.h
+++ b/arch/arm64/include/asm/asm-extable.h
@@ -10,6 +10,7 @@

/* _MC indicates that can fixup from machine check errors */
#define EX_TYPE_UACCESS_MC 5
+#define EX_TYPE_UACCESS_MC_ERR_ZERO 6

#ifdef __ASSEMBLY__

@@ -75,6 +76,15 @@
#define EX_DATA_REG(reg, gpr) \
"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"

+#define _ASM_EXTABLE_UACCESS_MC_ERR_ZERO(insn, fixup, err, zero) \
+ __DEFINE_ASM_GPR_NUMS \
+ __ASM_EXTABLE_RAW(#insn, #fixup, \
+ __stringify(EX_TYPE_UACCESS_MC_ERR_ZERO), \
+ "(" \
+ EX_DATA_REG(ERR, err) " | " \
+ EX_DATA_REG(ZERO, zero) \
+ ")")
+
#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \
__DEFINE_ASM_GPR_NUMS \
__ASM_EXTABLE_RAW(#insn, #fixup, \
@@ -87,6 +97,10 @@
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)

+
+#define _ASM_EXTABLE_UACCESS_MC_ERR(insn, fixup, err) \
+ _ASM_EXTABLE_UACCESS_MC_ERR_ZERO(insn, fixup, err, wzr)
+
#define EX_DATA_REG_DATA_SHIFT 0
#define EX_DATA_REG_DATA GENMASK(4, 0)
#define EX_DATA_REG_ADDR_SHIFT 5
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index e8dce0cc5eaa..e41b47df48b0 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -236,7 +236,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
asm volatile( \
"1: " load " " reg "1, [%2]\n" \
"2:\n" \
- _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
+ _ASM_EXTABLE_UACCESS_MC_ERR_ZERO(1b, 2b, %w0, %w1) \
: "+r" (err), "=&r" (x) \
: "r" (addr))

@@ -325,7 +325,7 @@ do { \
asm volatile( \
"1: " store " " reg "1, [%2]\n" \
"2:\n" \
- _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \
+ _ASM_EXTABLE_UACCESS_MC_ERR(1b, 2b, %w0) \
: "+r" (err) \
: "r" (x), "r" (addr))

diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index 5de256a25464..ca7388f3923b 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -79,6 +79,7 @@ bool fixup_exception(struct pt_regs *regs)
case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs);
case EX_TYPE_UACCESS_ERR_ZERO:
+ case EX_TYPE_UACCESS_MC_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
return ex_handler_load_unaligned_zeropad(ex, regs);
@@ -98,6 +99,8 @@ bool fixup_exception_mc(struct pt_regs *regs)
switch (ex->type) {
case EX_TYPE_UACCESS_MC:
return ex_handler_fixup(ex, regs);
+ case EX_TYPE_UACCESS_MC_ERR_ZERO:
+ return ex_handler_uaccess_err_zero(ex, regs);
}

return false;
--
2.18.0.huawei.25