Re: [PATCH V2 5/6] x86/intel_rdt: Use perf infrastructure for measurements

From: Peter Zijlstra
Date: Thu Sep 06 2018 - 17:39:16 EST


On Thu, Sep 06, 2018 at 01:37:14PM -0700, Reinette Chatre wrote:
> On 9/6/2018 1:29 PM, Peter Zijlstra wrote:
> > On Thu, Sep 06, 2018 at 01:05:05PM -0700, Reinette Chatre wrote:
> >> When I separate the above into the two functions it just becomes either:
> >> rdpmcl(l2_hit_pmcnum, l2_hits_after);
> >> rdpmcl(l2_miss_pmcnum, l2_miss_after);
> >> or:
> >> rdpmcl(l3_hit_pmcnum, l3_hits_after);
> >> rdpmcl(l3_miss_pmcnum, l3_miss_after);
> >>
> >
> > Right, which is the exact _same_ code, so you only need a single
> > function.
> >
>
> From my understanding it is not this code specifically that is causing
> the cache misses but instead the code and variables used to decide
> whether to run them or not. These would still be needed when I extract
> the above into inline functions.

Oh, seriously, use your brain.. This is trivial stuff. Compare the two
functions l2/l3.

They are _identical_ except for some silly bits before/after and
some spurious differences because apparently you cannot copy/paste.
I thought there would be some differences in the loop, but not even
that. They really are identical.

The below should work I think.

---

struct recidency_counts {
u64 miss_before, hits_before;
u64 miss_after, hits_after;
};

static int measure_residency_fn(struct perf_event_attr *miss_attr,
struct perf_event_attr *hit_attr,
void *plr, struct recidency_counts *counts)
{
+ u64 hits_before, hits_after, miss_before, miss_after;
+ struct perf_event *miss_event, *hit_event;
+ int hit_pmcnum, miss_pmcnum;
unsigned int line_size;
unsigned int size;
unsigned long i;
void *mem_r;
+ u64 tmp;

+ miss_event = perf_event_create_kernel_counter(miss_attr,
+ plr->cpu,
+ NULL, NULL, NULL);
+ if (IS_ERR(miss_event))
+ goto out;
+
+ hit_event = perf_event_create_kernel_counter(hit_attr,
+ plr->cpu,
+ NULL, NULL, NULL);
+ if (IS_ERR(hit_event))
+ goto out_miss;
+
+ local_irq_disable();
+ /*
+ * Check any possible error state of events used by performing
+ * one local read.
+ */
+ if (perf_event_read_local(miss_event, &tmp, NULL, NULL)) {
+ local_irq_enable();
+ goto out_hit;
+ }
+ if (perf_event_read_local(hit_event, &tmp, NULL, NULL)) {
+ local_irq_enable();
+ goto out_hit;
+ }
+
+ /*
+ * Disable hardware prefetchers.
*
+ * Call wrmsr direcly to avoid the local register variables from
+ * being overwritten due to reordering of their assignment with
+ * the wrmsr calls.
+ */
+ __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+
+ /* Initialize rest of local variables */
+ /*
+ * Performance event has been validated right before this with
+ * interrupts disabled - it is thus safe to read the counter index.
+ */
+ miss_pmcnum = x86_perf_rdpmc_index(miss_event);
+ hit_pmcnum = x86_perf_rdpmc_index(hit_event);
+ line_size = READ_ONCE(plr->line_size);
+ mem_r = READ_ONCE(plr->kmem);
+ size = READ_ONCE(plr->size);
+
+ /*
+ * Read counter variables twice - first to load the instructions
+ * used in L1 cache, second to capture accurate value that does not
+ * include cache misses incurred because of instruction loads.
+ */
+ rdpmcl(hit_pmcnum, hits_before);
+ rdpmcl(miss_pmcnum, miss_before);
+ /*
+ */
+ rmb();
+ rdpmcl(hit_pmcnum, hits_before);
+ rdpmcl(miss_pmcnum, miss_before);
+ /*
+ */
+ rmb();
+ for (i = 0; i < size; i += line_size) {
+ /*
+ * Add a barrier to prevent speculative execution of this
+ * loop reading beyond the end of the buffer.
+ */
+ rmb();
+ asm volatile("mov (%0,%1,1), %%eax\n\t"
+ :
+ : "r" (mem_r), "r" (i)
+ : "%eax", "memory");
+ }
rmb();
+ rdpmcl(hit_pmcnum, hits_after);
+ rdpmcl(miss_pmcnum, miss_after);
+ rmb();
+ /* Re-enable hardware prefetchers */
+ wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+ local_irq_enable();
+out_hit:
+ perf_event_release_kernel(hit_event);
+out_miss:
+ perf_event_release_kernel(miss_event);
+out:
counts->miss_before = miss_before;
counts->hits_before = hits_before;
counts->miss_after = miss_after;
counts->hits_after = hits_after;
+ return 0;
+}

measure_l2_recidency()
{
struct recidency_counts counts;

+ switch (boot_cpu_data.x86_model) {
+ case INTEL_FAM6_ATOM_GOLDMONT:
+ case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ perf_miss_attr.config = X86_CONFIG(.event = 0xd1,
+ .umask = 0x10);
+ perf_hit_attr.config = X86_CONFIG(.event = 0xd1,
+ .umask = 0x2);
+ break;
+ default:
+ goto out;
+ }

measure_recidency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts);

trace_pseudo_lock_l2(counts->hits_after - counts->hits_before,
counts->miss_after - counts->miss_before);
out:
+ plr->thread_done = 1;
+ wake_up_interruptible(&plr->lock_thread_wq);
}

measure_l3_residency()
{
struct recidency_counts counts;

switch (boot_cpu_data.x86_model) {
case INTEL_FAM6_BROADWELL_X:
/* On BDW the l3_hit_bits count references, not hits */
+ perf_hit_attr.config = X86_CONFIG(.event = 0x2e,
+ .umask = 0x4f);
+ perf_miss_attr.config = X86_CONFIG(.event = 0x2e,
+ .umask = 0x41);
break;
default:
goto out;
}

measure_recidency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts);

+ counts->miss_after -= counts->miss_before;
+ if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X) {
+ /*
+ * On BDW references and misses are counted, need to adjust.
+ * Sometimes the "hits" counter is a bit more than the
+ * references, for example, x references but x + 1 hits.
+ * To not report invalid hit values in this case we treat
+ * that as misses equal to references.
+ */
+ /* First compute the number of cache references measured */
+ counts->hits_after -= counts->hits_before;
+ /* Next convert references to cache hits */
+ counts->hits_after -= counts->miss_after > counts->hits_after ?
+ counts->hits_after : counts->miss_after;
+ } else {
+ counts->hits_after -= counts->hits_before;
}

+ trace_pseudo_lock_l3(counts->hits_after, counts->miss_after);
}