Re: [PATCH 1/3] perf-events: Add support for supplementary event registers

From: Stephane Eranian
Date: Thu Nov 11 2010 - 13:07:04 EST


Andi,

Thanks for creating this patch. It was on my TODO list for a while.
OFFCORE_RESPONSE is indeed a very useful event.

One thing I noticed in your patch is that you don't special
case the configuration where HT is off. In that case, the
sharing problem goes away. I think you could override
either way during init.

Some more tidbits:
- OFFCORE_RESPONSE_0 is 0x01b7
- OFFCORE_RESPONSE_1 is 0x01bb

The umask is not zero but 1. Dont' know if you get
something meaningful is you pass a umask of zero.
But that's the user's responsibility to set this right.

An alternative approach could have been to stash the
extra MSR value in the upper 32-bit value of the config.
It's 16-bit wide today. OFFCORE_RESPONSE is a
model specific event. There is no guarantee it will be
there in future CPU, so it would be safe to do that as well.

On Thu, Nov 11, 2010 at 5:15 PM, Andi Kleen <andi@xxxxxxxxxxxxxx> wrote:
> From: Andi Kleen <ak@xxxxxxxxxxxxxxx>
>
> Intel Nehalem/Westmere have a special OFFCORE_RESPONSE event
> that can be used to monitor any offcore accesses from a core.
> This is a very useful event for various tunings, and it's
> also needed to implement the generic LLC-* events correctly.
>
> Unfortunately this event requires programming a mask in a separate
> register. And worse this separate register is per core, not per
> CPU thread.
>
> This patch adds:
> - Teaches perf_events that OFFCORE_RESPONSE need extra parameters.
> - Adds a new field to the user interface to pass the extra mask.
> This reuses one of the unused fields for perf events. The change
> is ABI neutral because noone is likely to have used OFFCORE_RESPONSE
> before (with zero mask it wouldn't count anything)
> - Add support to the Intel perf_event core to schedule the per
> core resource. I tried to add generic infrastructure for this
> that could be also used for other core resources.
> The basic code has is patterned after the similar AMD northbridge
> constraints code.
>
> Thanks to Stephane Eranian who pointed out some problems
> in the original version and suggested improvements.
>
> Cc: eranian@xxxxxxxxxx
> Signed-off-by: Andi Kleen <ak@xxxxxxxxxxxxxxx>
> ---
> Âarch/x86/kernel/cpu/perf_event.c    |  56 +++++++++++++++
> Âarch/x86/kernel/cpu/perf_event_intel.c | Â120 ++++++++++++++++++++++++++++++++
> Âinclude/linux/perf_event.h       |  Â7 ++-
> Â3 files changed, 182 insertions(+), 1 deletions(-)
>
> diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
> index ed63101..97133ec 100644
> --- a/arch/x86/kernel/cpu/perf_event.c
> +++ b/arch/x86/kernel/cpu/perf_event.c
> @@ -93,6 +93,8 @@ struct amd_nb {
> Â Â Â Âstruct event_constraint event_constraints[X86_PMC_IDX_MAX];
> Â};
>
> +struct intel_percore;
> +
> Â#define MAX_LBR_ENTRIES Â Â Â Â Â Â Â Â16
>
> Âstruct cpu_hw_events {
> @@ -126,6 +128,8 @@ struct cpu_hw_events {
>    Âvoid              Â*lbr_context;
>    Âstruct perf_branch_stack    Âlbr_stack;
>    Âstruct perf_branch_entry    Âlbr_entries[MAX_LBR_ENTRIES];
> +    int               percore_used;
> +    struct intel_percore      Â*per_core;
>
> Â Â Â Â/*
> Â Â Â Â * AMD specific bits
> @@ -175,6 +179,24 @@ struct cpu_hw_events {
> Â#define for_each_event_constraint(e, c) Â Â Â Â\
> Â Â Â Âfor ((e) = (c); (e)->weight; (e)++)
>
> +/*
> + * Extra registers for specific events.
> + * Some events need large masks and require external MSRs.
> + * Define a mapping to these extra registers.
> + */
> +
> +struct extra_reg {
> + Â Â Â unsigned event;
> + Â Â Â unsigned msr;
> + Â Â Â u64 config_mask;
> + Â Â Â u64 valid_mask;
> +};
> +
> +#define EVENT_EXTRA_REG(event, msr, m, vm) { event, msr, m, vm }
> +#define INTEL_EVENT_EXTRA_REG(event, msr, vm) \
> + Â Â Â EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm)
> +#define EVENT_EXTRA_END {}
> +
> Âunion perf_capabilities {
> Â Â Â Âstruct {
>        Âu64   lbr_format  Â: 6;
> @@ -219,6 +241,7 @@ struct x86_pmu {
>    Âvoid      Â(*put_event_constraints)(struct cpu_hw_events *cpuc,
> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct perf_event *event);
> Â Â Â Âstruct event_constraint *event_constraints;
> + Â Â Â struct event_constraint *percore_constraints;
>    Âvoid      Â(*quirks)(void);
>    Âint       perfctr_second_write;
>
> @@ -247,6 +270,11 @@ struct x86_pmu {
> Â Â Â Â */
>    Âunsigned long  lbr_tos, lbr_from, lbr_to; /* MSR base regs    */
>    Âint       lbr_nr;          Â/* hardware stack size */
> +
> + Â Â Â /*
> + Â Â Â Â* Extra registers for events
> + Â Â Â Â*/
> + Â Â Â struct extra_reg *extra_regs;
> Â};
>
> Âstatic struct x86_pmu x86_pmu __read_mostly;
> @@ -530,6 +558,28 @@ static int x86_pmu_hw_config(struct perf_event *event)
> Â}
>
> Â/*
> + * Find and validate any extra registers to set up.
> + */
> +static int x86_pmu_extra_regs(struct perf_event *event)
> +{
> + Â Â Â struct extra_reg *er;
> +
> + Â Â Â if (!x86_pmu.extra_regs)
> + Â Â Â Â Â Â Â return 0;
> +
> + Â Â Â for (er = x86_pmu.extra_regs; er->msr; er++) {
> + Â Â Â Â Â Â Â if (er->event != (event->attr.config & er->config_mask))
> + Â Â Â Â Â Â Â Â Â Â Â continue;
> + Â Â Â Â Â Â Â if (event->attr.event_extra & ~er->valid_mask)
> + Â Â Â Â Â Â Â Â Â Â Â return -EINVAL;
> + Â Â Â Â Â Â Â event->hw.extra_reg = er->msr;
> + Â Â Â Â Â Â Â event->hw.extra_config = event->attr.event_extra;
> + Â Â Â Â Â Â Â break;
> + Â Â Â }
> + Â Â Â return 0;
> +}
> +
> +/*
> Â* Setup the hardware configuration for a given attr_type
> Â*/
> Âstatic int __x86_pmu_event_init(struct perf_event *event)
> @@ -561,6 +611,10 @@ static int __x86_pmu_event_init(struct perf_event *event)
> Â Â Â Âevent->hw.last_cpu = -1;
> Â Â Â Âevent->hw.last_tag = ~0ULL;
>
> + Â Â Â err = x86_pmu_extra_regs(event);
> + Â Â Â if (err)
> + Â Â Â Â Â Â Â return err;
> +
> Â Â Â Âreturn x86_pmu.hw_config(event);
> Â}
>
> @@ -876,6 +930,8 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âu64 enable_mask)
> Â{
> Â Â Â Âwrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
> + Â Â Â if (hwc->extra_reg)
> + Â Â Â Â Â Â Â wrmsrl(hwc->extra_reg, hwc->extra_config);
> Â}
>
> Âstatic inline void x86_pmu_disable_event(struct perf_event *event)
> diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
> index c8f5c08..bbe7fba 100644
> --- a/arch/x86/kernel/cpu/perf_event_intel.c
> +++ b/arch/x86/kernel/cpu/perf_event_intel.c
> @@ -1,5 +1,14 @@
> Â#ifdef CONFIG_CPU_SUP_INTEL
>
> +struct intel_percore {
> + Â Â Â raw_spinlock_t lock;
> + Â Â Â int ref;
> + Â Â Â u64 config;
> + Â Â Â unsigned extra_reg;
> + Â Â Â u64 extra_config;
> +};
> +static DEFINE_PER_CPU(struct intel_percore, intel_percore);
> +
> Â/*
> Â* Intel PerfMon, used on Core and later.
> Â*/
> @@ -64,6 +73,18 @@ static struct event_constraint intel_nehalem_event_constraints[] =
> Â Â Â ÂEVENT_CONSTRAINT_END
> Â};
>
> +static struct extra_reg intel_nehalem_extra_regs[] =
> +{
> + Â Â Â INTEL_EVENT_EXTRA_REG(0xb7, 0x1a6, 0xffff), /* OFFCORE_RESPONSE1 */
> + Â Â Â EVENT_EXTRA_END
> +};
> +
> +static struct event_constraint intel_nehalem_percore_constraints[] =
> +{
> + Â Â Â INTEL_EVENT_CONSTRAINT(0xb7, 0),
> + Â Â Â EVENT_CONSTRAINT_END
> +};
> +
> Âstatic struct event_constraint intel_westmere_event_constraints[] =
> Â{
> Â Â Â ÂFIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
> @@ -76,6 +97,20 @@ static struct event_constraint intel_westmere_event_constraints[] =
> Â Â Â ÂEVENT_CONSTRAINT_END
> Â};
>
> +static struct extra_reg intel_westmere_extra_regs[] =
> +{
> + Â Â Â INTEL_EVENT_EXTRA_REG(0xb7, 0x1a6, 0xffff), /* OFFCORE_RESPONSE1 */
> + Â Â Â INTEL_EVENT_EXTRA_REG(0xbb, 0x1a7, 0xffff), /* OFFCORE_RESPONSE2 */
> + Â Â Â EVENT_EXTRA_END
> +};
> +
> +static struct event_constraint intel_westmere_percore_constraints[] =
> +{
> + Â Â Â INTEL_EVENT_CONSTRAINT(0xb7, 0),
> + Â Â Â INTEL_EVENT_CONSTRAINT(0xbb, 0),
> + Â Â Â EVENT_CONSTRAINT_END
> +};
> +
> Âstatic struct event_constraint intel_gen_event_constraints[] =
> Â{
> Â Â Â ÂFIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
> @@ -794,6 +829,56 @@ intel_bts_constraints(struct perf_event *event)
> Â}
>
> Âstatic struct event_constraint *
> +intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
> +{
> + Â Â Â struct hw_perf_event *hwc = &event->hw;
> + Â Â Â unsigned e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT;
> + Â Â Â struct event_constraint *c;
> + Â Â Â struct intel_percore *pc;
> +
> + Â Â Â if (!x86_pmu.percore_constraints)
> + Â Â Â Â Â Â Â return NULL;
> +
> + Â Â Â for (c = x86_pmu.percore_constraints; c->cmask; c++) {
> + Â Â Â Â Â Â Â if (e != c->code)
> + Â Â Â Â Â Â Â Â Â Â Â continue;
> +
> + Â Â Â Â Â Â Â c = NULL;
> +
> + Â Â Â Â Â Â Â /*
> + Â Â Â Â Â Â Â Â* Allocate resource per core.
> + Â Â Â Â Â Â Â Â* Currently only one such per core resource can be allocated.
> + Â Â Â Â Â Â Â Â*/
> + Â Â Â Â Â Â Â pc = cpuc->per_core;
> + Â Â Â Â Â Â Â if (!pc)
> + Â Â Â Â Â Â Â Â Â Â Â break;
> + Â Â Â Â Â Â Â raw_spin_lock(&pc->lock);
> + Â Â Â Â Â Â Â if (pc->ref > 0) {
> + Â Â Â Â Â Â Â Â Â Â Â /* Allow identical settings */
> + Â Â Â Â Â Â Â Â Â Â Â if (hwc->config == pc->config &&
> + Â Â Â Â Â Â Â Â Â Â Â Â Â hwc->extra_reg == pc->extra_reg &&
> + Â Â Â Â Â Â Â Â Â Â Â Â Â hwc->extra_config == pc->extra_config) {
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â pc->ref++;
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â cpuc->percore_used = 1;
> + Â Â Â Â Â Â Â Â Â Â Â } else {
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â /* Deny due to conflict */
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â c = &emptyconstraint;
> + Â Â Â Â Â Â Â Â Â Â Â }
> + Â Â Â Â Â Â Â } else {
> + Â Â Â Â Â Â Â Â Â Â Â pc->config = hwc->config;
> + Â Â Â Â Â Â Â Â Â Â Â pc->extra_reg = hwc->extra_reg;
> + Â Â Â Â Â Â Â Â Â Â Â pc->extra_config = hwc->extra_config;
> + Â Â Â Â Â Â Â Â Â Â Â pc->ref = 1;
> + Â Â Â Â Â Â Â Â Â Â Â cpuc->percore_used = 1;
> + Â Â Â Â Â Â Â }
> + Â Â Â Â Â Â Â raw_spin_unlock(&pc->lock);
> + Â Â Â Â Â Â Â return c;
> + Â Â Â }
> +
> + Â Â Â return NULL;
> +}
> +
> +static struct event_constraint *
> Âintel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
> Â{
> Â Â Â Âstruct event_constraint *c;
> @@ -806,9 +891,29 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
> Â Â Â Âif (c)
> Â Â Â Â Â Â Â Âreturn c;
>
> + Â Â Â c = intel_percore_constraints(cpuc, event);
> + Â Â Â if (c)
> + Â Â Â Â Â Â Â return c;
> +
> Â Â Â Âreturn x86_get_event_constraints(cpuc, event);
> Â}
>
> +static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct perf_event *event)
> +{
> + Â Â Â struct intel_percore *pc;
> +
> + Â Â Â if (!cpuc->percore_used)
> + Â Â Â Â Â Â Â return;
> +
> + Â Â Â pc = cpuc->per_core;
> + Â Â Â raw_spin_lock(&pc->lock);
> + Â Â Â pc->ref--;
> + Â Â Â BUG_ON(pc->ref < 0);
> + Â Â Â raw_spin_unlock(&pc->lock);
> + Â Â Â cpuc->percore_used = 0;
> +}
> +
> Âstatic int intel_pmu_hw_config(struct perf_event *event)
> Â{
> Â Â Â Âint ret = x86_pmu_hw_config(event);
> @@ -854,6 +959,7 @@ static __initconst const struct x86_pmu core_pmu = {
> Â Â Â Â */
>    Â.max_period       = (1ULL << 31) - 1,
> Â Â Â Â.get_event_constraints Â= intel_get_event_constraints,
> + Â Â Â .put_event_constraints Â= intel_put_event_constraints,
>    Â.event_constraints   Â= intel_core_event_constraints,
> Â};
>
> @@ -929,6 +1035,7 @@ static __init int intel_pmu_init(void)
> Â Â Â Âunion cpuid10_eax eax;
> Â Â Â Âunsigned int unused;
> Â Â Â Âunsigned int ebx;
> + Â Â Â int cpu;
> Â Â Â Âint version;
>
> Â Â Â Âif (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
> @@ -1010,7 +1117,10 @@ static __init int intel_pmu_init(void)
> Â Â Â Â Â Â Â Âintel_pmu_lbr_init_nhm();
>
> Â Â Â Â Â Â Â Âx86_pmu.event_constraints = intel_nehalem_event_constraints;
> + Â Â Â Â Â Â Â x86_pmu.percore_constraints =
> + Â Â Â Â Â Â Â Â Â Â Â intel_nehalem_percore_constraints;
> Â Â Â Â Â Â Â Âx86_pmu.enable_all = intel_pmu_nhm_enable_all;
> + Â Â Â Â Â Â Â x86_pmu.extra_regs = intel_nehalem_extra_regs;
> Â Â Â Â Â Â Â Âpr_cont("Nehalem events, ");
> Â Â Â Â Â Â Â Âbreak;
>
> @@ -1032,7 +1142,10 @@ static __init int intel_pmu_init(void)
> Â Â Â Â Â Â Â Âintel_pmu_lbr_init_nhm();
>
> Â Â Â Â Â Â Â Âx86_pmu.event_constraints = intel_westmere_event_constraints;
> + Â Â Â Â Â Â Â x86_pmu.percore_constraints =
> + Â Â Â Â Â Â Â Â Â Â Â intel_westmere_percore_constraints;
> Â Â Â Â Â Â Â Âx86_pmu.enable_all = intel_pmu_nhm_enable_all;
> + Â Â Â Â Â Â Â x86_pmu.extra_regs = intel_westmere_extra_regs;
> Â Â Â Â Â Â Â Âpr_cont("Westmere events, ");
> Â Â Â Â Â Â Â Âbreak;
>
> @@ -1043,6 +1156,13 @@ static __init int intel_pmu_init(void)
> Â Â Â Â Â Â Â Âx86_pmu.event_constraints = intel_gen_event_constraints;
> Â Â Â Â Â Â Â Âpr_cont("generic architected perfmon, ");
> Â Â Â Â}
> +
> + Â Â Â for_each_possible_cpu(cpu) {
> + Â Â Â Â Â Â Â raw_spin_lock_init(&per_cpu(intel_percore, cpu).lock);
> + Â Â Â Â Â Â Â per_cpu(cpu_hw_events, cpu).per_core =
> + Â Â Â Â Â Â Â Â Â Â Â &per_cpu(intel_percore,
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âcpumask_first(topology_core_cpumask(cpu)));
> + Â Â Â }
> Â Â Â Âreturn 0;
> Â}
>
> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> index 057bf22..a353594 100644
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -224,7 +224,10 @@ struct perf_event_attr {
> Â Â Â Â};
>
> Â Â Â Â__u32 Â Â Â Â Â Â Â Â Â bp_type;
> - Â Â Â __u64 Â Â Â Â Â Â Â Â Â bp_addr;
> + Â Â Â union {
> + Â Â Â Â Â Â Â __u64 Â Â Â Â Â Â Â Â Â bp_addr;
> + Â Â Â Â Â Â Â __u64 Â Â Â Â Â Â Â Â Â event_extra; /* Extra for some events */
> + Â Â Â };
> Â Â Â Â__u64 Â Â Â Â Â Â Â Â Â bp_len;
> Â};
>
> @@ -529,6 +532,8 @@ struct hw_perf_event {
>            Âunsigned long  event_base;
>            Âint       idx;
>            Âint       last_cpu;
> +            unsigned    Âextra_reg;
> + Â Â Â Â Â Â Â Â Â Â Â u64 Â Â Â Â Â Â extra_config;
> Â Â Â Â Â Â Â Â};
> Â Â Â Â Â Â Â Âstruct { /* software */
> Â Â Â Â Â Â Â Â Â Â Â Âstruct hrtimer Âhrtimer;
> --
> 1.7.1
>
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/