Re: [PATCH 0/3] perf/core, x86: unify perfctr bitmasks

From: Lin Ming
Date: Thu Apr 01 2010 - 03:05:36 EST


On Thu, 2010-04-01 at 10:14 +0800, Lin Ming wrote:
> On Thu, 2010-04-01 at 01:05 +0800, Cyrill Gorcunov wrote:
> > On Wed, Mar 31, 2010 at 08:26:47PM +0400, Cyrill Gorcunov wrote:
> > > On Wed, Mar 31, 2010 at 08:15:23PM +0400, Cyrill Gorcunov wrote:
> > > > On Tue, Mar 30, 2010 at 09:04:00PM +0200, Peter Zijlstra wrote:
> > > > > On Tue, 2010-03-30 at 22:29 +0400, Cyrill Gorcunov wrote:
> > > > [...]
> > > > > >
> > > [...]
> > > > +static inline bool p4_is_odd_cpl(u32 escr)
> > > > +{
> > > > + unsigned int t0 = (escr & P4_ESCR_T0_ANY) << 0;
> > > > + unsigned int t1 = (escr & P4_ESCR_T1_ANY) << 2;
> > > > +
> > > > + if ((t0 ^ t1) != t0)
> > > > + return true;
> > >
> > > /me in shame: This is bogus, Peter don't take it yet.
> > >
> >
> > Updated
> >
> > -- Cyrill
> > ---
> > x86, perf: P4 PMU -- check for permission granted on ANY event v2
> >
> > In case if a caller (user) asked us to count events with
> > some weird mask we should check if this priviledge has been
> > granted since this could be a mix of bitmasks we not like
> > which but allow if caller insist.
> >
> > By ANY event term the combination of USR/OS bits in ESCR
> > register is assumed.
>
> I'll test this patch.
> Does it need to be applied on top of Robert's patch?

I tested this patch on top of Peter's and Robert's patch.
Works well on P4.

Only some build error, fixed by below patch.

In file included from arch/x86/kernel/cpu/perf_event.c:1326:
arch/x86/kernel/cpu/perf_event_p6.c:94: error: âx86_hw_configâ undeclared here (not in a function)
arch/x86/kernel/cpu/perf_event_p6.c:99: error: unknown field âraw_eventâ specified in initializer
arch/x86/kernel/cpu/perf_event_p6.c:99: error: âx86_pmu_raw_eventâ undeclared here (not in a function)
make[3]: *** [arch/x86/kernel/cpu/perf_event.o] Error 1

diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 626abc0..4ec9680 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -91,12 +91,11 @@ static __initconst struct x86_pmu p6_pmu = {
.enable_all = p6_pmu_enable_all,
.enable = p6_pmu_enable_event,
.disable = p6_pmu_disable_event,
- .hw_config = x86_hw_config,
+ .hw_config = x86_pmu_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_P6_EVNTSEL0,
.perfctr = MSR_P6_PERFCTR0,
.event_map = p6_pmu_event_map,
- .raw_event = x86_pmu_raw_event,
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
.apic = 1,
.max_period = (1ULL << 31) - 1,


Lin Ming

>
> Lin Ming
>
> >
> > CC: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
> > Signed-off-by: Cyrill Gorcunov <gorcunov@xxxxxxxxxx>
> > ---
> > arch/x86/include/asm/perf_event_p4.h | 17 +++++++++++++++++
> > arch/x86/kernel/cpu/perf_event_p4.c | 24 +++++++++++++++++++++---
> > 2 files changed, 38 insertions(+), 3 deletions(-)
> >
> > Index: linux-2.6.git/arch/x86/include/asm/perf_event_p4.h
> > =====================================================================
> > --- linux-2.6.git.orig/arch/x86/include/asm/perf_event_p4.h
> > +++ linux-2.6.git/arch/x86/include/asm/perf_event_p4.h
> > @@ -33,6 +33,9 @@
> > #define P4_ESCR_T1_OS 0x00000002U
> > #define P4_ESCR_T1_USR 0x00000001U
> >
> > +#define P4_ESCR_T0_ANY (P4_ESCR_T0_OS | P4_ESCR_T0_USR)
> > +#define P4_ESCR_T1_ANY (P4_ESCR_T1_OS | P4_ESCR_T1_USR)
> > +
> > #define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT)
> > #define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
> > #define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
> > @@ -134,6 +137,20 @@
> > #define P4_CONFIG_HT_SHIFT 63
> > #define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT)
> >
> > +/*
> > + * typically we set USR or/and OS bits for one of the
> > + * threads only at once, any other option is treated
> > + * as "any"
> > + */
> > +static inline bool p4_is_any_cpl(u32 escr)
> > +{
> > + if ((escr & P4_ESCR_T0_ANY) &&
> > + (escr & P4_ESCR_T1_ANY))
> > + return true;
> > +
> > + return false;
> > +}
> > +
> > static inline bool p4_is_event_cascaded(u64 config)
> > {
> > u32 cccr = p4_config_unpack_cccr(config);
> > Index: linux-2.6.git/arch/x86/kernel/cpu/perf_event_p4.c
> > =====================================================================
> > --- linux-2.6.git.orig/arch/x86/kernel/cpu/perf_event_p4.c
> > +++ linux-2.6.git/arch/x86/kernel/cpu/perf_event_p4.c
> > @@ -443,13 +443,18 @@ static int p4_hw_config(struct perf_even
> > return 0;
> >
> > /*
> > + * a caller may ask for something definitely weird and
> > + * screwed, sigh...
> > + */
> > + escr = p4_config_unpack_escr(event->attr.config);
> > + if (p4_is_any_cpl(escr) && perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
> > + return -EACCES;
> > +
> > + /*
> > * We don't control raw events so it's up to the caller
> > * to pass sane values (and we don't count the thread number
> > * on HT machine but allow HT-compatible specifics to be
> > * passed on)
> > - *
> > - * XXX: HT wide things should check perf_paranoid_cpu() &&
> > - * CAP_SYS_ADMIN
> > */
> > event->hw.config |= event->attr.config &
> > (p4_config_pack_escr(P4_ESCR_MASK_HT) |
> > @@ -630,6 +635,19 @@ static void p4_pmu_swap_config_ts(struct
> > escr = p4_config_unpack_escr(hwc->config);
> > cccr = p4_config_unpack_cccr(hwc->config);
> >
> > + /*
> > + * for non-standart configs we don't clobber cpl
> > + * related bits so it's preferred the caller don't
> > + * use this mode
> > + */
> > + if (unlikely(p4_is_any_cpl(escr))) {
> > + if (p4_ht_thread(cpu))
> > + hwc->config |= P4_CONFIG_HT;
> > + else
> > + hwc->config &= ~P4_CONFIG_HT;
> > + return;
> > + }
> > +
> > if (p4_ht_thread(cpu)) {
> > cccr &= ~P4_CCCR_OVF_PMI_T0;
> > cccr |= P4_CCCR_OVF_PMI_T1;

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/