[PATCH 6/6] x86/intel_rdt: Support dcache and icache mask for Code data prioritization

From: Vikas Shivappa
Date: Sun Aug 23 2015 - 18:46:53 EST


Add support to read and write the per cgroup data and instruction cache
masks.
- When a new cgroup directory is created it inherits the parents dcache
and icache mask. So a mkdir never fails.
- When user changes the mask, we look to see if we can reuse an
existing CLOSid or else allocate a new one. When we run of CLOSids , we
simply return -ENOSPC.
---
arch/x86/kernel/cpu/intel_rdt.c | 177 ++++++++++++++++++++++++++++++++++++----
1 file changed, 163 insertions(+), 14 deletions(-)

diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 285db1e..d0eaf04 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -428,6 +428,22 @@ static int intel_cache_alloc_cbm_read(struct seq_file *m, void *v)
return 0;
}

+static int cdp_dcache_read(struct seq_file *m, void *v)
+{
+ struct intel_rdt *ir = css_rdt(seq_css(m));
+
+ seq_printf(m, "%08lx\n", cdp_cm_map[ir->closid].dcache_mask);
+ return 0;
+}
+
+static int cdp_icache_read(struct seq_file *m, void *v)
+{
+ struct intel_rdt *ir = css_rdt(seq_css(m));
+
+ seq_printf(m, "%08lx\n", cdp_cm_map[ir->closid].icache_mask);
+ return 0;
+}
+
static inline bool cbm_is_contiguous(unsigned long var)
{
unsigned long maxcbm = MAX_CBM_LENGTH;
@@ -445,13 +461,24 @@ static inline bool cbm_is_contiguous(unsigned long var)
return true;
}

-static int cbm_validate(struct intel_rdt *ir, unsigned long cbmvalue)
+static int cache_mask_validate(struct intel_rdt *ir, unsigned long cbmvalue)
{
+ u32 max_cbm = boot_cpu_data.x86_cache_max_cbm_len;
struct cgroup_subsys_state *css;
struct intel_rdt *par, *c;
unsigned long *cbm_tmp;
+ u64 max_mask;
int err = 0;

+ if (ir == &rdt_root_group)
+ return -EPERM;
+
+ max_mask = (1ULL << max_cbm) - 1;
+ if (cbmvalue & ~max_mask) {
+ err = -EINVAL;
+ goto out_err;
+ }
+
if (!cbm_is_contiguous(cbmvalue)) {
pr_err("bitmask should have >=%d bits and be contiguous\n",
min_bitmask_len);
@@ -486,10 +513,12 @@ out_err:
static bool cbm_search(unsigned long cbm, u32 *closid)
{
u32 maxid = boot_cpu_data.x86_cache_max_closid;
+ unsigned long cache_mask;
u32 i;

for (i = 0; i < maxid; i++) {
- if (bitmap_equal(&cbm, &cat_cm_map[i].cache_mask, MAX_CBM_LENGTH)) {
+ cache_mask = cat_cm_map[i].cache_mask;
+ if (bitmap_equal(&cbm, &cache_mask, MAX_CBM_LENGTH)) {
*closid = i;
return true;
}
@@ -511,30 +540,19 @@ static bool cbm_search(unsigned long cbm, u32 *closid)
static int intel_cache_alloc_cbm_write(struct cgroup_subsys_state *css,
struct cftype *cft, u64 cbmvalue)
{
- u32 max_cbm = boot_cpu_data.x86_cache_max_cbm_len;
struct intel_rdt *ir = css_rdt(css);
ssize_t err = 0;
- u64 max_mask;
u32 closid;

- if (ir == &rdt_root_group)
- return -EPERM;
-
/*
* Need global mutex as cbm write may allocate a closid.
*/
mutex_lock(&rdt_group_mutex);

- max_mask = (1ULL << max_cbm) - 1;
- if (cbmvalue & ~max_mask) {
- err = -EINVAL;
- goto out;
- }
-
if (cbmvalue == cat_cm_map[ir->closid].cache_mask)
goto out;

- err = cbm_validate(ir, cbmvalue);
+ err = cache_mask_validate(ir, cbmvalue);
if (err)
goto out;

@@ -567,6 +585,133 @@ out:
return err;
}

+static void cdp_clos_map_dump(void)
+{
+ u32 i;
+
+ pr_debug("CBMMAP\n");
+ for (i = 0; i < boot_cpu_data.x86_cache_max_closid; i++) {
+ pr_debug("dcache_mask: 0x%x, icache_mask=0x%x,clos_refcnt:%u\n",
+ (unsigned int)cdp_cm_map[i].dcache_mask,
+ (unsigned int)cdp_cm_map[i].icache_mask,
+ cdp_cm_map[i].clos_refcnt);
+ }
+}
+
+static bool cdp_mask_search(unsigned long dcache_mask,
+ unsigned long icache_mask, u32 *closid)
+{
+ u32 maxid = boot_cpu_data.x86_cache_max_closid;
+ unsigned long dcm, icm;
+ u32 i;
+
+ for (i = 0; i < maxid; i++) {
+ dcm = cdp_cm_map[i].dcache_mask;
+ icm = cdp_cm_map[i].icache_mask;
+ if (bitmap_equal(&dcache_mask, &dcm, MAX_CBM_LENGTH)
+ && bitmap_equal(&icache_mask, &icm, MAX_CBM_LENGTH)) {
+ *closid = i;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int cdp_mask_write(struct intel_rdt *ir, u64 dcache_mask,
+ u64 icache_mask)
+{
+ int err = 0;
+ u32 closid;
+
+ /*
+ * Try to get a reference for a different CLOSid and release the
+ * reference to the current CLOSid.
+ * Need to put down the reference here and get it back in case we
+ * run out of closids. Otherwise we run into a problem when
+ * we could be using the last closid that could have been available.
+ */
+ closid_put(ir->closid);
+ if (cdp_mask_search(dcache_mask, icache_mask, &closid)) {
+ ir->closid = closid;
+ closid_get(closid);
+ } else {
+ err = closid_alloc(ir);
+ if (err) {
+ closid_get(ir->closid);
+ goto out;
+ }
+ closid = ir->closid;
+ cdp_cm_map[closid].dcache_mask = dcache_mask;
+ cdp_cm_map[closid].icache_mask = icache_mask;
+ msr_update_all(DCACHE_MASK_INDEX(closid), dcache_mask);
+ msr_update_all(ICACHE_MASK_INDEX(closid), icache_mask);
+ }
+out:
+
+ return err;
+}
+
+static int cdp_dcache_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 new_dcache_mask)
+{
+ unsigned long curr_icache_mask, curr_dcache_mask;
+ struct intel_rdt *ir = css_rdt(css);
+ int err = 0;
+
+ /*
+ * Need global mutex as cache mask write may allocate a closid.
+ */
+ mutex_lock(&rdt_group_mutex);
+
+ curr_dcache_mask = cdp_cm_map[ir->closid].dcache_mask;
+ curr_icache_mask = cdp_cm_map[ir->closid].icache_mask;
+
+ if (new_dcache_mask == curr_dcache_mask)
+ goto out;
+
+ err = cache_mask_validate(ir, new_dcache_mask);
+ if (err)
+ goto out;
+
+ err = cdp_mask_write(ir, new_dcache_mask, curr_icache_mask);
+ cdp_clos_map_dump();
+out:
+ mutex_unlock(&rdt_group_mutex);
+
+ return err;
+}
+
+static int cdp_icache_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 new_icache_mask)
+{
+ unsigned long curr_icache_mask, curr_dcache_mask;
+ struct intel_rdt *ir = css_rdt(css);
+ int err = 0;
+
+ /*
+ * Need global mutex as cache mask write may allocate a closid.
+ */
+ mutex_lock(&rdt_group_mutex);
+
+ curr_dcache_mask = cdp_cm_map[ir->closid].dcache_mask;
+ curr_icache_mask = cdp_cm_map[ir->closid].icache_mask;
+
+ if (new_icache_mask == curr_icache_mask)
+ goto out;
+
+ err = cache_mask_validate(ir, new_icache_mask);
+ if (err)
+ goto out;
+
+ err = cdp_mask_write(ir, curr_dcache_mask, new_icache_mask);
+ cdp_clos_map_dump();
+out:
+ mutex_unlock(&rdt_group_mutex);
+
+ return err;
+}
+
static inline bool rdt_cpumask_update(int cpu)
{
static cpumask_t tmp;
@@ -722,10 +867,14 @@ static struct cftype rdt_files[] = {
{
.name = "icache_mask",
.flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_NOT_ON_ROOT,
+ .seq_show = cdp_icache_read,
+ .write_u64 = cdp_icache_write,
},
{
.name = "dcache_mask",
.flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_NOT_ON_ROOT,
+ .seq_show = cdp_dcache_read,
+ .write_u64 = cdp_dcache_write,
},
{ } /* terminate */
};
--
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/