[PATCH 1/6] arm64: kpti: move check for non-vulnerable CPUs to a function

From: Jeremy Linton
Date: Thu Dec 06 2018 - 18:44:32 EST


From: Mian Yousaf Kaukab <ykaukab@xxxxxxx>

Add is_meltdown_safe() which is a whitelist of known safe cores.

Signed-off-by: Mian Yousaf Kaukab <ykaukab@xxxxxxx>
[Moved location of function]
Signed-off-by: Jeremy Linton <jeremy.linton@xxxxxxx>
---
arch/arm64/kernel/cpufeature.c | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index aec5ecb85737..242898395f68 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -908,8 +908,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */

-static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
- int scope)
+static bool is_cpu_meltdown_safe(void)
{
/* List of CPUs that are not vulnerable and don't need KPTI */
static const struct midr_range kpti_safe_list[] = {
@@ -917,6 +916,16 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
{ /* sentinel */ }
};
+ /* Don't force KPTI for CPUs that are not vulnerable */
+ if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+ return true;
+
+ return false;
+}
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
char const *str = "command line option";

/*
@@ -940,8 +949,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return true;

- /* Don't force KPTI for CPUs that are not vulnerable */
- if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+ if (is_cpu_meltdown_safe())
return false;

/* Defer to CPU feature registers */
--
2.17.2