RE: [tip:perf/core] perf/x86: Add an MSR PMU driver

From: Liang, Kan
Date: Tue Aug 04 2015 - 16:50:23 EST




> On Tue, Aug 4, 2015 at 11:11 AM, Liang, Kan <kan.liang@xxxxxxxxx> wrote:
> >
> >
> >>
> >> On Tue, Aug 4, 2015 at 8:03 AM, Liang, Kan <kan.liang@xxxxxxxxx>
> wrote:
> >> >
> >> >> > +
> >> >> > +enum perf_msr_id {
> >> >> > + PERF_MSR_TSC = 0,
> >> >> > + PERF_MSR_APERF = 1,
> >> >> > + PERF_MSR_MPERF = 2,
> >> >> > + PERF_MSR_PPERF = 3,
> >> >> > + PERF_MSR_SMI = 4,
> >> >> > +
> >> >> > + PERF_MSR_EVENT_MAX,
> >> >> > +};
> >> >> > +
> >> >> > +struct perf_msr {
> >> >> > + int id;
> >> >> > + u64 msr;
> >> >> > +};
> >> >> > +
> >> >> > +static struct perf_msr msr[] = {
> >> >> > + { PERF_MSR_TSC, 0 },
> >> >> > + { PERF_MSR_APERF, MSR_IA32_APERF },
> >> >> > + { PERF_MSR_MPERF, MSR_IA32_MPERF },
> >> >> > + { PERF_MSR_PPERF, MSR_PPERF },
> >> >> > + { PERF_MSR_SMI, MSR_SMI_COUNT }, };
> >> >>
> >> >> I think this could be easier to work with if it were
> >> >> [PERF_MSR_TSC] = {...}, etc. No big deal, though, until the list
> >> >> gets long. However, it might make fixing the apparent issue below
> easier...
> >> >>
> >> >> > +static int msr_event_init(struct perf_event *event) {
> >> >> > + u64 cfg = event->attr.config;
> >> >>
> >> >> ...
> >> >>
> >> >> > + event->hw.event_base = msr[cfg].msr;
> >> >>
> >> >> Shouldn't this verify that the fancy enumeration code actually
> >> >> believes that msr[cfg] exists on this system? Otherwise we might
> >> >> have a very short wait until the perf fuzzer oopses this thing :)
> >> >>
> >> >
> >> > I think we already did the check before using msr[cfg].
> >>
> >> Where? All I see is:
> >>
> >> + if (cfg >= PERF_MSR_EVENT_MAX)
> >> + return -EINVAL;
> >
> > Yes, we check cfg here. So msr[cfg] should be always available.
> >
>
> PERF_MSR_EVENT_MAX is a constant. If I run this thing on an AMD CPU
> that supports TSC, APERF, MPERF, and nothing else, and someone asks for
> PPERF, then the check will succeed and we'll oops, right?
>

Right, it could be a problem.
How about the patch as below?

---
From 0217ffc9a0d2fac6417552b9f66fafc538ef9068 Mon Sep 17 00:00:00 2001
Date: Tue, 4 Aug 2015 08:27:19 -0400
Subject: [PATCH 1/1] perf/x86/msr: Fix issue of accessing unsupported MSR
events

Most of platforms only support part of MSR events. If unsupported MSR
events are mistakenly accessed, it may cause oops.
Introducing .available to mark the supported MSR events, and check it in
event init.

Reported-by: Andy Lutomirski <luto@xxxxxxxxxx>
Signed-off-by: Kan Liang <kan.liang@xxxxxxxxx>
---
arch/x86/kernel/cpu/perf_event_msr.c | 21 ++++++++++++++++-----
1 file changed, 16 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c
index af216e9..c174242 100644
--- a/arch/x86/kernel/cpu/perf_event_msr.c
+++ b/arch/x86/kernel/cpu/perf_event_msr.c
@@ -13,14 +13,15 @@ enum perf_msr_id {
struct perf_msr {
int id;
u64 msr;
+ bool available;
};

static struct perf_msr msr[] = {
- { PERF_MSR_TSC, 0 },
- { PERF_MSR_APERF, MSR_IA32_APERF },
- { PERF_MSR_MPERF, MSR_IA32_MPERF },
- { PERF_MSR_PPERF, MSR_PPERF },
- { PERF_MSR_SMI, MSR_SMI_COUNT },
+ { PERF_MSR_TSC, 0, true },
+ { PERF_MSR_APERF, MSR_IA32_APERF, false },
+ { PERF_MSR_MPERF, MSR_IA32_MPERF, false },
+ { PERF_MSR_PPERF, MSR_PPERF, false },
+ { PERF_MSR_SMI, MSR_SMI_COUNT, false },
};

PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00");
@@ -74,6 +75,10 @@ static int msr_event_init(struct perf_event *event)
event->attr.sample_period) /* no sampling */
return -EINVAL;

+ /* The platform may not support all events */
+ if (!msr[cfg].available)
+ return -EINVAL;
+
event->hw.idx = -1;
event->hw.event_base = msr[cfg].msr;
event->hw.config = cfg;
@@ -181,18 +186,22 @@ static int __init intel_msr_init(int idx)
case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
case 79: /* 14nm Broadwell Server */
events_attrs[idx++] = &evattr_smi.attr.attr;
+ msr[PERF_MSR_SMI].available = true;
break;

case 78: /* 14nm Skylake Mobile */
case 94: /* 14nm Skylake Desktop */
events_attrs[idx++] = &evattr_pperf.attr.attr;
events_attrs[idx++] = &evattr_smi.attr.attr;
+ msr[PERF_MSR_PPERF].available = true;
+ msr[PERF_MSR_SMI].available = true;
break;

case 55: /* 22nm Atom "Silvermont" */
case 76: /* 14nm Atom "Airmont" */
case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
events_attrs[idx++] = &evattr_smi.attr.attr;
+ msr[PERF_MSR_SMI].available = true;
break;
}

@@ -215,6 +224,8 @@ static int __init msr_init(void)
events_attrs[idx++] = &evattr_aperf.attr.attr;
events_attrs[idx++] = &evattr_mperf.attr.attr;
events_attrs[idx] = NULL;
+ msr[PERF_MSR_APERF].available = true;
+ msr[PERF_MSR_MPERF].available = true;
}

switch (boot_cpu_data.x86_vendor) {
--