[PATCH 2/8] x86/intel_rdt/mba: Generalize the naming to get ready for MBA

From: Vikas Shivappa
Date: Fri Feb 17 2017 - 15:00:12 EST


Preparatory patch to generalize the naming for RDT so that we can get
ready to apply other resource controls like MBA.

RDT resource cbm values are named to ctrl_val representing generic
control values which will hold both cbm(cache bit mask) and memory b/w
throttle values. max_cbm is updated to default_ctrl which represents
default values which provide no control which is all bits set in case of
CAT and 100% bandwidth in case of MBA. The tmp_cbm is updated to
tmp_ctrls. Similarly domain structures are updated to ctrl_val instead
of cbm.

APIs are also generalized:
- get_cache_config is added to separate from memory specific apis.
- MSR update api names are changed from having cbm to ctrl.
- info file API names are set to reflect generic default_ctrl or control
values rather than cbm.

Signed-off-by: Vikas Shivappa <vikas.shivappa@xxxxxxxxxxxxxxx>
---
arch/x86/include/asm/intel_rdt.h | 22 +++++++++++-----------
arch/x86/kernel/cpu/intel_rdt.c | 28 ++++++++++++++--------------
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 12 ++++++------
arch/x86/kernel/cpu/intel_rdt_schemata.c | 26 +++++++++++++-------------
4 files changed, 44 insertions(+), 44 deletions(-)

diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
index 95ce5c8..326df9e 100644
--- a/arch/x86/include/asm/intel_rdt.h
+++ b/arch/x86/include/asm/intel_rdt.h
@@ -71,14 +71,14 @@ struct rftype {
* @capable: Is this feature available on this machine
* @name: Name to use in "schemata" file
* @num_closid: Number of CLOSIDs available
- * @max_cbm: Largest Cache Bit Mask allowed
+ * @default_ctrl: Specifies default cache cbm or mem b/w percent.
* @min_cbm_bits: Minimum number of consecutive bits to be set
* in a cache bit mask
* @domains: All domains for this resource
* @num_domains: Number of domains active
* @msr_base: Base MSR address for CBMs
- * @tmp_cbms: Scratch space when updating schemata
- * @num_tmp_cbms: Number of CBMs in tmp_cbms
+ * @tmp_ctrl: Scratch space when updating schemata
+ * @num_tmp_ctrl: Number of control values in tmp_ctrl
* @cache_level: Which cache level defines scope of this domain
* @cbm_idx_multi: Multiplier of CBM index
* @cbm_idx_offset: Offset of CBM index. CBM index is computed by:
@@ -91,12 +91,12 @@ struct rdt_resource {
int num_closid;
int cbm_len;
int min_cbm_bits;
- u32 max_cbm;
+ u32 default_ctrl;
struct list_head domains;
int num_domains;
int msr_base;
- u32 *tmp_cbms;
- int num_tmp_cbms;
+ u32 *tmp_ctrl;
+ int num_tmp_ctrl;
int cache_level;
int cbm_idx_multi;
int cbm_idx_offset;
@@ -107,13 +107,13 @@ struct rdt_resource {
* @list: all instances of this resource
* @id: unique id for this instance
* @cpu_mask: which cpus share this resource
- * @cbm: array of cache bit masks (indexed by CLOSID)
+ * @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID)
*/
struct rdt_domain {
struct list_head list;
int id;
struct cpumask cpu_mask;
- u32 *cbm;
+ u32 *ctrl_val;
};

/**
@@ -164,8 +164,8 @@ enum {
unsigned int full;
};

-/* CPUID.(EAX=10H, ECX=ResID=1).EDX */
-union cpuid_0x10_1_edx {
+/* CPUID.(EAX=10H, ECX=ResID).EDX */
+union cpuid_0x10_x_edx {
struct {
unsigned int cos_max:16;
} split;
@@ -174,7 +174,7 @@ enum {

DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);

-void rdt_cbm_update(void *arg);
+void rdt_ctrl_update(void *arg);
struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
void rdtgroup_kn_unlock(struct kernfs_node *kn);
ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index c8af5d9..5d8c1be 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -119,7 +119,7 @@ static inline bool cache_alloc_hsw_probe(void)

r->num_closid = 4;
r->cbm_len = 20;
- r->max_cbm = max_cbm;
+ r->default_ctrl = max_cbm;
r->min_cbm_bits = 2;
r->capable = true;
r->enabled = true;
@@ -130,16 +130,16 @@ static inline bool cache_alloc_hsw_probe(void)
return false;
}

-static void rdt_get_config(int idx, struct rdt_resource *r)
+static void rdt_get_cache_config(int idx, struct rdt_resource *r)
{
union cpuid_0x10_1_eax eax;
- union cpuid_0x10_1_edx edx;
+ union cpuid_0x10_x_edx edx;
u32 ebx, ecx;

cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
r->num_closid = edx.split.cos_max + 1;
r->cbm_len = eax.split.cbm_len + 1;
- r->max_cbm = BIT_MASK(eax.split.cbm_len + 1) - 1;
+ r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
r->capable = true;
r->enabled = true;
}
@@ -151,7 +151,7 @@ static void rdt_get_cdp_l3_config(int type)

r->num_closid = r_l3->num_closid / 2;
r->cbm_len = r_l3->cbm_len;
- r->max_cbm = r_l3->max_cbm;
+ r->default_ctrl = r_l3->default_ctrl;
r->capable = true;
/*
* By default, CDP is disabled. CDP can be enabled by mount parameter
@@ -171,7 +171,7 @@ static inline bool get_rdt_resources(void)
return false;

if (boot_cpu_has(X86_FEATURE_CAT_L3)) {
- rdt_get_config(1, &rdt_resources_all[RDT_RESOURCE_L3]);
+ rdt_get_cache_config(1, &rdt_resources_all[RDT_RESOURCE_L3]);
if (boot_cpu_has(X86_FEATURE_CDP_L3)) {
rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA);
rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE);
@@ -180,7 +180,7 @@ static inline bool get_rdt_resources(void)
}
if (boot_cpu_has(X86_FEATURE_CAT_L2)) {
/* CPUID 0x10.2 fields are same format at 0x10.1 */
- rdt_get_config(2, &rdt_resources_all[RDT_RESOURCE_L2]);
+ rdt_get_cache_config(2, &rdt_resources_all[RDT_RESOURCE_L2]);
ret = true;
}

@@ -200,7 +200,7 @@ static int get_cache_id(int cpu, int level)
return -1;
}

-void rdt_cbm_update(void *arg)
+void rdt_ctrl_update(void *arg)
{
struct msr_param *m = (struct msr_param *)arg;
struct rdt_resource *r = m->res;
@@ -221,7 +221,7 @@ void rdt_cbm_update(void *arg)
for (i = m->low; i < m->high; i++) {
int idx = cbm_idx(r, i);

- wrmsrl(r->msr_base + idx, d->cbm[i]);
+ wrmsrl(r->msr_base + idx, d->ctrl_val[i]);
}
}

@@ -294,8 +294,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)

d->id = id;

- d->cbm = kmalloc_array(r->num_closid, sizeof(*d->cbm), GFP_KERNEL);
- if (!d->cbm) {
+ d->ctrl_val = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL);
+ if (!d->ctrl_val) {
kfree(d);
return;
}
@@ -303,8 +303,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
for (i = 0; i < r->num_closid; i++) {
int idx = cbm_idx(r, i);

- d->cbm[i] = r->max_cbm;
- wrmsrl(r->msr_base + idx, d->cbm[i]);
+ d->ctrl_val[i] = r->default_ctrl;
+ wrmsrl(r->msr_base + idx, d->ctrl_val[i]);
}

cpumask_set_cpu(cpu, &d->cpu_mask);
@@ -326,7 +326,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
cpumask_clear_cpu(cpu, &d->cpu_mask);
if (cpumask_empty(&d->cpu_mask)) {
r->num_domains--;
- kfree(d->cbm);
+ kfree(d->ctrl_val);
list_del(&d->list);
kfree(d);
}
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 9b9565f..0d8fa61 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -498,12 +498,12 @@ static int rdt_num_closids_show(struct kernfs_open_file *of,
return 0;
}

-static int rdt_cbm_mask_show(struct kernfs_open_file *of,
+static int rdt_default_ctrl_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
struct rdt_resource *r = of->kn->parent->priv;

- seq_printf(seq, "%x\n", r->max_cbm);
+ seq_printf(seq, "%x\n", r->default_ctrl);

return 0;
}
@@ -530,7 +530,7 @@ static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
.name = "cbm_mask",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_cbm_mask_show,
+ .seq_show = rdt_default_ctrl_show,
},
{
.name = "min_cbm_bits",
@@ -803,14 +803,14 @@ static int reset_all_ctrls(struct rdt_resource *r, u32 sclosid, u32 eclosid)
cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);

for (i = sclosid; i < eclosid; i++)
- d->cbm[i] = r->max_cbm;
+ d->ctrl_val[i] = r->default_ctrl;
}
cpu = get_cpu();
/* Update CBM on this cpu if it's in cpu_mask. */
if (cpumask_test_cpu(cpu, cpu_mask))
- rdt_cbm_update(&msr_param);
+ rdt_ctrl_update(&msr_param);
/* Update CBM on all other cpus in cpu_mask. */
- smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
+ smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
put_cpu();

free_cpumask_var(cpu_mask);
diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c
index 527d042..3cde1e8 100644
--- a/arch/x86/kernel/cpu/intel_rdt_schemata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_schemata.c
@@ -38,7 +38,7 @@ static bool cbm_validate(unsigned long var, struct rdt_resource *r)
{
unsigned long first_bit, zero_bit;

- if (var == 0 || var > r->max_cbm)
+ if (var == 0 || var > r->default_ctrl)
return false;

first_bit = find_first_bit(&var, r->cbm_len);
@@ -66,7 +66,7 @@ static int parse_cbm(char *buf, struct rdt_resource *r)
return ret;
if (!cbm_validate(data, r))
return -EINVAL;
- r->tmp_cbms[r->num_tmp_cbms++] = data;
+ r->tmp_ctrl[r->num_tmp_ctrl++] = data;

return 0;
}
@@ -95,7 +95,7 @@ static int parse_line(char *line, struct rdt_resource *r)
}

/* Incorrect number of domains in the line */
- if (r->num_tmp_cbms != r->num_domains)
+ if (r->num_tmp_ctrl != r->num_domains)
return -EINVAL;

/* Any garbage at the end of the line? */
@@ -123,18 +123,18 @@ static int update_domains(struct rdt_resource *r, int closid)
* There by avoiding unnecessary IPIs.
*/
list_for_each_entry(d, &r->domains, list) {
- if (d->cbm[msr_param.low] != r->tmp_cbms[idx]) {
+ if (d->ctrl_val[msr_param.low] != r->tmp_ctrl[idx]) {
cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
- d->cbm[msr_param.low] = r->tmp_cbms[idx];
+ d->ctrl_val[msr_param.low] = r->tmp_ctrl[idx];
}
idx++;
}
cpu = get_cpu();
/* Update CBM on this cpu if it's in cpu_mask. */
if (cpumask_test_cpu(cpu, cpu_mask))
- rdt_cbm_update(&msr_param);
+ rdt_ctrl_update(&msr_param);
/* Update CBM on other cpus. */
- smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
+ smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
put_cpu();

free_cpumask_var(cpu_mask);
@@ -187,13 +187,13 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,

/* get scratch space to save all the masks while we validate input */
for_each_enabled_rdt_resource(r) {
- r->tmp_cbms = kcalloc(r->num_domains, sizeof(*l3_cbms),
+ r->tmp_ctrl = kcalloc(r->num_domains, sizeof(*l3_cbms),
GFP_KERNEL);
- if (!r->tmp_cbms) {
+ if (!r->tmp_ctrl) {
ret = -ENOMEM;
goto out;
}
- r->num_tmp_cbms = 0;
+ r->num_tmp_ctrl = 0;
}

r = NULL;
@@ -225,8 +225,8 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
out:
rdtgroup_kn_unlock(of->kn);
for_each_enabled_rdt_resource(r) {
- kfree(r->tmp_cbms);
- r->tmp_cbms = NULL;
+ kfree(r->tmp_ctrl);
+ r->tmp_ctrl = NULL;
}
return ret ?: nbytes;
}
@@ -240,7 +240,7 @@ static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
list_for_each_entry(dom, &r->domains, list) {
if (sep)
seq_puts(s, ";");
- seq_printf(s, "%d=%x", dom->id, dom->cbm[closid]);
+ seq_printf(s, "%d=%x", dom->id, dom->ctrl_val[closid]);
sep = true;
}
seq_puts(s, "\n");
--
1.9.1