[PATCH v4 11/16] x86/mtrr: let cache_aps_delayed_init replace mtrr_aps_delayed_init

From: Juergen Gross
Date: Tue Oct 04 2022 - 04:13:04 EST


In order to prepare decoupling MTRR and PAT replace the MTRR specific
mtrr_aps_delayed_init flag with a more generic cache_aps_delayed_init
one.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V2:
- new patch
V4:
- reestablish function to set cache_aps_delayed_init (Borislav Petkov)
---
arch/x86/include/asm/cacheinfo.h | 3 +++
arch/x86/include/asm/mtrr.h | 2 --
arch/x86/kernel/cpu/cacheinfo.c | 7 +++++++
arch/x86/kernel/cpu/mtrr/mtrr.c | 17 ++++-------------
arch/x86/kernel/smpboot.c | 5 +++--
5 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
index 978bac70fd49..1339bf0908dc 100644
--- a/arch/x86/include/asm/cacheinfo.h
+++ b/arch/x86/include/asm/cacheinfo.h
@@ -7,11 +7,14 @@ extern unsigned int memory_caching_control;
#define CACHE_MTRR 0x01
#define CACHE_PAT 0x02

+extern bool cache_aps_delayed_init;
+
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);

void cache_disable(void);
void cache_enable(void);
void cache_cpu_init(void);
+void set_cache_aps_delayed_init(void);

#endif /* _ASM_X86_CACHEINFO_H */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 986249a2b9b6..5d31219c8529 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -43,7 +43,6 @@ extern int mtrr_del(int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
extern void mtrr_ap_init(void);
-extern void set_mtrr_aps_delayed_init(void);
extern void mtrr_aps_init(void);
extern void mtrr_bp_restore(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
@@ -87,7 +86,6 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
{
}
#define mtrr_ap_init() do {} while (0)
-#define set_mtrr_aps_delayed_init() do {} while (0)
#define mtrr_aps_init() do {} while (0)
#define mtrr_bp_restore() do {} while (0)
#define mtrr_disable() do {} while (0)
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 49b60a427fc9..330aa412be63 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -1137,3 +1137,10 @@ void cache_cpu_init(void)
cache_enable();
local_irq_restore(flags);
}
+
+bool cache_aps_delayed_init;
+
+void set_cache_aps_delayed_init(void)
+{
+ cache_aps_delayed_init = true;
+}
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 7eed5387e828..68376c924b75 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -64,7 +64,6 @@ unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
static DEFINE_MUTEX(mtrr_mutex);

u64 size_or_mask, size_and_mask;
-static bool mtrr_aps_delayed_init;

static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;

@@ -171,7 +170,7 @@ static int mtrr_rendezvous_handler(void *info)
if (data->smp_reg != ~0U) {
mtrr_if->set(data->smp_reg, data->smp_base,
data->smp_size, data->smp_type);
- } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
+ } else if (cache_aps_delayed_init || !cpu_online(smp_processor_id())) {
cache_cpu_init();
}
return 0;
@@ -778,7 +777,7 @@ void __init mtrr_bp_init(void)

void mtrr_ap_init(void)
{
- if (!memory_caching_control || mtrr_aps_delayed_init)
+ if (!memory_caching_control || cache_aps_delayed_init)
return;

/*
@@ -812,14 +811,6 @@ void mtrr_save_state(void)
smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
}

-void set_mtrr_aps_delayed_init(void)
-{
- if (!memory_caching_control)
- return;
-
- mtrr_aps_delayed_init = true;
-}
-
/*
* Delayed MTRR initialization for all AP's
*/
@@ -833,11 +824,11 @@ void mtrr_aps_init(void)
* by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
* then we are done.
*/
- if (!mtrr_aps_delayed_init)
+ if (!cache_aps_delayed_init)
return;

set_mtrr(~0U, 0, 0, 0);
- mtrr_aps_delayed_init = false;
+ cache_aps_delayed_init = false;
}

void mtrr_bp_restore(void)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f24227bc3220..de2082c21e8b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -58,6 +58,7 @@
#include <linux/overflow.h>

#include <asm/acpi.h>
+#include <asm/cacheinfo.h>
#include <asm/desc.h>
#include <asm/nmi.h>
#include <asm/irq.h>
@@ -1428,7 +1429,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)

uv_system_init();

- set_mtrr_aps_delayed_init();
+ set_cache_aps_delayed_init();

smp_quirk_init_udelay();

@@ -1439,7 +1440,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)

void arch_thaw_secondary_cpus_begin(void)
{
- set_mtrr_aps_delayed_init();
+ set_cache_aps_delayed_init();
}

void arch_thaw_secondary_cpus_end(void)
--
2.35.3