[PATCH v3 3/7] arm64: kpti: move check for non-vulnerable CPUs to a function

From: Jeremy Linton
Date: Wed Jan 09 2019 - 18:56:30 EST


From: Mian Yousaf Kaukab <ykaukab@xxxxxxx>

Add is_meltdown_safe() which is a whitelist of known safe cores.

Signed-off-by: Mian Yousaf Kaukab <ykaukab@xxxxxxx>
[Moved location of function]
Signed-off-by: Jeremy Linton <jeremy.linton@xxxxxxx>
---
arch/arm64/kernel/cpufeature.c | 15 +++++++++++----
1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 4f272399de89..ab784d7a0083 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -947,8 +947,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */

-static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
- int scope)
+static bool is_cpu_meltdown_safe(void)
{
/* List of CPUs that are not vulnerable and don't need KPTI */
static const struct midr_range kpti_safe_list[] = {
@@ -962,6 +961,15 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
{ /* sentinel */ }
};
+ if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+ return true;
+
+ return false;
+}
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
char const *str = "command line option";

/*
@@ -985,8 +993,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return true;

- /* Don't force KPTI for CPUs that are not vulnerable */
- if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+ if (is_cpu_meltdown_safe())
return false;

/* Defer to CPU feature registers */
--
2.17.2