[PATCH V2 09/23] perf/x86/intel: Check Arch LBR MSRs

From: kan . liang
Date: Fri Jun 26 2020 - 14:24:44 EST


From: Kan Liang <kan.liang@xxxxxxxxxxxxxxx>

The KVM may not support the MSRs of Architecture LBR. Accessing the
MSRs may cause #GP and crash the guest.

The MSRs have to be checked at guest boot time.

Only using the max number of Architecture LBR depth to check the
MSR_ARCH_LBR_DEPTH should be good enough. The max number can be
calculated by 8 * the position of the last set bit of LBR_DEPTH value
in CPUID enumeration.

Co-developed-by: Like Xu <like.xu@xxxxxxxxxxxxxxx>
Signed-off-by: Like Xu <like.xu@xxxxxxxxxxxxxxx>
Signed-off-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx>
---
arch/x86/events/intel/core.c | 24 ++++++++++++++++++++++--
arch/x86/events/perf_event.h | 5 +++++
2 files changed, 27 insertions(+), 2 deletions(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 117307a..eb17068 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4141,6 +4141,8 @@ static bool check_msr(unsigned long msr, u64 mask)

if (is_lbr_from(msr))
val_tmp = lbr_from_signext_quirk_wr(val_tmp);
+ else if (msr == MSR_ARCH_LBR_DEPTH)
+ val_tmp = x86_pmu_get_max_arch_lbr_nr();

if (wrmsrl_safe(msr, val_tmp) ||
rdmsrl_safe(msr, &val_new))
@@ -5184,8 +5186,23 @@ __init int intel_pmu_init(void)
* Check all LBT MSR here.
* Disable LBR access if any LBR MSRs can not be accessed.
*/
- if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
- x86_pmu.lbr_nr = 0;
+ if (x86_pmu.lbr_nr) {
+ if (x86_pmu.arch_lbr) {
+ u64 mask = 1;
+
+ if (x86_pmu.lbr_ebx.split.lbr_cpl)
+ mask |= ARCH_LBR_CTL_CPL;
+ if (x86_pmu.lbr_ebx.split.lbr_filter)
+ mask |= ARCH_LBR_CTL_FILTER;
+ if (x86_pmu.lbr_ebx.split.lbr_call_stack)
+ mask |= ARCH_LBR_CTL_STACK;
+ if (!check_msr(MSR_ARCH_LBR_CTL, mask))
+ x86_pmu.lbr_nr = 0;
+ if (!check_msr(MSR_ARCH_LBR_DEPTH, 0))
+ x86_pmu.lbr_nr = 0;
+ } else if (!check_msr(x86_pmu.lbr_tos, 0x3UL))
+ x86_pmu.lbr_nr = 0;
+ }
for (i = 0; i < x86_pmu.lbr_nr; i++) {
if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
@@ -5202,6 +5219,9 @@ __init int intel_pmu_init(void)
*/
if (x86_pmu.extra_regs) {
for (er = x86_pmu.extra_regs; er->msr; er++) {
+ /* Skip Arch LBR which is already verified */
+ if (x86_pmu.arch_lbr && (er->idx == EXTRA_REG_LBR))
+ continue;
er->extra_msr_access = check_msr(er->msr, 0x11UL);
/* Disable LBR select mapping */
if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 9b0e533..f333c83 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -813,6 +813,11 @@ static inline bool x86_pmu_has_lbr_callstack(void)
x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
}

+static inline int x86_pmu_get_max_arch_lbr_nr(void)
+{
+ return fls(x86_pmu.lbr_eax.split.lbr_depth_mask) * 8;
+}
+
DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);

int x86_perf_event_set_period(struct perf_event *event);
--
2.7.4