[PATCH v3 07/18] x86/intel_rdt: Add Haswell feature discovery

From: Fenghua Yu
Date: Fri Oct 07 2016 - 19:45:44 EST


From: Fenghua Yu <fenghua.yu@xxxxxxxxx>

Some Haswell generation CPUs support RDT, but they don't enumerate this
using CPUID. Use rdmsr_safe() and wrmsr_safe() to probe the MSRs on
cpu model 63 (INTEL_FAM6_HASWELL_X)

Signed-off-by: Fenghua Yu <fenghua.yu@xxxxxxxxx>
Signed-off-by: Tony Luck <tony.luck@xxxxxxxxx>
---
arch/x86/events/intel/cqm.c | 2 +-
arch/x86/include/asm/intel_rdt_common.h | 6 ++++++
arch/x86/kernel/cpu/intel_rdt.c | 38 +++++++++++++++++++++++++++++++++
3 files changed, 45 insertions(+), 1 deletion(-)
create mode 100644 arch/x86/include/asm/intel_rdt_common.h

diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 8f82b02..df86874 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -7,9 +7,9 @@
#include <linux/perf_event.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel_rdt_common.h>
#include "../perf_event.h"

-#define MSR_IA32_PQR_ASSOC 0x0c8f
#define MSR_IA32_QM_CTR 0x0c8e
#define MSR_IA32_QM_EVTSEL 0x0c8d

diff --git a/arch/x86/include/asm/intel_rdt_common.h b/arch/x86/include/asm/intel_rdt_common.h
new file mode 100644
index 0000000..e6e15cf
--- /dev/null
+++ b/arch/x86/include/asm/intel_rdt_common.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_INTEL_RDT_COMMON_H
+#define _ASM_X86_INTEL_RDT_COMMON_H
+
+#define MSR_IA32_PQR_ASSOC 0x0c8f
+
+#endif /* _ASM_X86_INTEL_RDT_COMMON_H */
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index ebe8dae..bc7f10b 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -26,11 +26,49 @@

#include <linux/slab.h>
#include <linux/err.h>
+#include <asm/intel_rdt_common.h>
+#include <asm/intel-family.h>
+
+/*
+ * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
+ * as it does not have CPUID enumeration support for Cache allocation.
+ *
+ * Probes by writing to the high 32 bits(CLOSid) of the IA32_PQR_MSR and
+ * testing if the bits stick. Max CLOSids is always 4 and max cbm length
+ * is always 20 on hsw server parts. The minimum cache bitmask length
+ * allowed for HSW server is always 2 bits. Hardcode all of them.
+ */
+static inline bool cache_alloc_hsw_probe(void)
+{
+ u32 l, h_old, h_new, h_tmp;
+
+ if (rdmsr_safe(MSR_IA32_PQR_ASSOC, &l, &h_old))
+ return false;
+
+ /*
+ * Default value is always 0 if feature is present.
+ */
+ h_tmp = h_old ^ 0x1U;
+ if (wrmsr_safe(MSR_IA32_PQR_ASSOC, l, h_tmp))
+ return false;
+ rdmsr(MSR_IA32_PQR_ASSOC, l, h_new);
+
+ if (h_tmp != h_new)
+ return false;
+
+ wrmsr(MSR_IA32_PQR_ASSOC, l, h_old);
+
+ return true;
+}

static inline bool get_rdt_resources(struct cpuinfo_x86 *c)
{
bool ret = false;

+ if (c->x86_vendor == X86_VENDOR_INTEL && c->x86 == 6 &&
+ c->x86_model == INTEL_FAM6_HASWELL_X)
+ return cache_alloc_hsw_probe();
+
if (!cpu_has(c, X86_FEATURE_RDT_A))
return false;
if (cpu_has(c, X86_FEATURE_CAT_L3))
--
2.5.0