RE: [PATCH v2 14/18] x86/resctrl: Add helpers for system wide mon/alloc capable

From: Shaopeng Tan (Fujitsu)
Date: Wed Jan 25 2023 - 02:17:54 EST


> resctrl reads rdt_alloc_capable or rdt_mon_capable to determine whether any
> of the resources support the corresponding features.
> resctrl also uses the static-keys that affect the architecture's context-switch
> code to determine the same thing.
>
> This forces another architecture to have the same static-keys.
>
> As the static-key is enabled based on the capable flag, and none of the
> filesystem uses of these are in the scheduler path, move the capable flags
> behind helpers, and use these in the filesystem code instead of the static-key.
>
> After this change, only the architecture code manages and uses the static-keys
> to ensure __resctrl_sched_in() does not need runtime checks.
>
> This avoids multiple architectures having to define the same static-keys.
>
> Tested-by: Shaopeng Tan <tan.shaopeng@xxxxxxxxxxx>
> Signed-off-by: James Morse <james.morse@xxxxxxx>
>
> ---
> Changes since v1:
> * Added missing conversion in mkdir_rdt_prepare_rmid_free()
> ---
> arch/x86/include/asm/resctrl.h | 13 +++++++++
> arch/x86/kernel/cpu/resctrl/internal.h | 2 --
> arch/x86/kernel/cpu/resctrl/monitor.c | 4 +--
> arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 6 ++--
> arch/x86/kernel/cpu/resctrl/rdtgroup.c | 34 +++++++++++------------
> 5 files changed, 35 insertions(+), 24 deletions(-)
>
> diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h
> index 5b5ae6d8a343..3364d640f791 100644
> --- a/arch/x86/include/asm/resctrl.h
> +++ b/arch/x86/include/asm/resctrl.h
> @@ -38,10 +38,18 @@ struct resctrl_pqr_state {
>
> DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state);
>
> +extern bool rdt_alloc_capable;
> +extern bool rdt_mon_capable;
> +
> DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
> DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
> DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
>
> +static inline bool resctrl_arch_alloc_capable(void) {
> + return rdt_alloc_capable;
> +}
> +
> static inline void resctrl_arch_enable_alloc(void) {
> static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
> @@ -54,6 +62,11 @@ static inline void resctrl_arch_disable_alloc(void)
> static_branch_dec_cpuslocked(&rdt_enable_key);
> }
>
> +static inline bool resctrl_arch_mon_capable(void) {
> + return rdt_mon_capable;
> +}
> +
> static inline void resctrl_arch_enable_mon(void) {
> static_branch_enable_cpuslocked(&rdt_mon_enable_key);
> diff --git a/arch/x86/kernel/cpu/resctrl/internal.h
> b/arch/x86/kernel/cpu/resctrl/internal.h
> index 3997386cee89..a1bf97adee2e 100644
> --- a/arch/x86/kernel/cpu/resctrl/internal.h
> +++ b/arch/x86/kernel/cpu/resctrl/internal.h
> @@ -88,8 +88,6 @@ struct rmid_read {
> int arch_mon_ctx;
> };
>
> -extern bool rdt_alloc_capable;
> -extern bool rdt_mon_capable;
> extern unsigned int rdt_mon_features;
> extern struct list_head resctrl_schema_all; extern bool resctrl_mounted; diff
> --git a/arch/x86/kernel/cpu/resctrl/monitor.c
> b/arch/x86/kernel/cpu/resctrl/monitor.c
> index 4ff258b49e9c..1a214bd32ed4 100644
> --- a/arch/x86/kernel/cpu/resctrl/monitor.c
> +++ b/arch/x86/kernel/cpu/resctrl/monitor.c
> @@ -795,7 +795,7 @@ void mbm_handle_overflow(struct work_struct *work)
>
> mutex_lock(&rdtgroup_mutex);
>
> - if (!resctrl_mounted || !static_branch_likely(&rdt_mon_enable_key))
> + if (!resctrl_mounted || !resctrl_arch_mon_capable())
> goto out_unlock;
>
> r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
> @@ -823,7 +823,7 @@ void mbm_setup_overflow_handler(struct rdt_domain
> *dom, unsigned long delay_ms)
> unsigned long delay = msecs_to_jiffies(delay_ms);
> int cpu;
>
> - if (!resctrl_mounted || !static_branch_likely(&rdt_mon_enable_key))
> + if (!resctrl_mounted || !resctrl_arch_mon_capable())
> return;
>
> cpu = cpumask_any(&dom->cpu_mask);
> diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
> b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
> index 3b724a40d3a2..0b4fdb118643 100644
> --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
> +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
> @@ -567,7 +567,7 @@ static int rdtgroup_locksetup_user_restrict(struct
> rdtgroup *rdtgrp)
> if (ret)
> goto err_cpus;
>
> - if (rdt_mon_capable) {
> + if (resctrl_arch_mon_capable()) {
> ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups");
> if (ret)
> goto err_cpus_list;
> @@ -614,7 +614,7 @@ static int rdtgroup_locksetup_user_restore(struct
> rdtgroup *rdtgrp)
> if (ret)
> goto err_cpus;
>
> - if (rdt_mon_capable) {
> + if (resctrl_arch_mon_capable()) {
> ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777);
> if (ret)
> goto err_cpus_list;
> @@ -762,7 +762,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) {
> int ret;
>
> - if (rdt_mon_capable) {
> + if (resctrl_arch_mon_capable()) {
> ret = alloc_rmid(rdtgrp->closid);
> if (ret < 0) {
> rdt_last_cmd_puts("Out of RMIDs\n"); diff --git
> a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> index 0e22f8361392..44e6d6fbab25 100644
> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> @@ -609,13 +609,13 @@ static int __rdtgroup_move_task(struct task_struct
> *tsk,
>
> static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) {
> - return (rdt_alloc_capable && (r->type == RDTCTRL_GROUP) &&
> + return (resctrl_arch_alloc_capable() && (r->type ==
> RDTCTRL_GROUP) &&
> resctrl_arch_match_closid(t, r->closid)); }
>
> static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) {
> - return (rdt_mon_capable && (r->type == RDTMON_GROUP) &&
> + return (resctrl_arch_mon_capable() && (r->type ==
> RDTMON_GROUP) &&
> resctrl_arch_match_rmid(t, r->mon.parent->closid,
> r->mon.rmid));
> }
> @@ -2220,7 +2220,7 @@ static int rdt_get_tree(struct fs_context *fc)
> if (ret < 0)
> goto out_schemata_free;
>
> - if (rdt_mon_capable) {
> + if (resctrl_arch_mon_capable()) {
> ret = mongroup_create_dir(rdtgroup_default.kn,
> &rdtgroup_default, "mon_groups",
> &kn_mongrp);
> @@ -2242,12 +2242,12 @@ static int rdt_get_tree(struct fs_context *fc)
> if (ret < 0)
> goto out_psl;
>
> - if (rdt_alloc_capable)
> + if (resctrl_arch_alloc_capable())
> resctrl_arch_enable_alloc();
> - if (rdt_mon_capable)
> + if (resctrl_arch_mon_capable())
> resctrl_arch_enable_mon();
>
> - if (rdt_alloc_capable || rdt_mon_capable)
> + if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable())
> resctrl_mounted = true;
>
> if (is_mbm_enabled()) {
> @@ -2261,10 +2261,10 @@ static int rdt_get_tree(struct fs_context *fc)
> out_psl:
> rdt_pseudo_lock_release();
> out_mondata:
> - if (rdt_mon_capable)
> + if (resctrl_arch_mon_capable())
> kernfs_remove(kn_mondata);
> out_mongrp:
> - if (rdt_mon_capable)
> + if (resctrl_arch_mon_capable())
> kernfs_remove(kn_mongrp);
> out_info:
> kernfs_remove(kn_info);
> @@ -2512,9 +2512,9 @@ static void rdt_kill_sb(struct super_block *sb)
> rdt_pseudo_lock_release();
> rdtgroup_default.mode = RDT_MODE_SHAREABLE;
> schemata_list_destroy();
> - if (rdt_alloc_capable)
> + if (resctrl_arch_alloc_capable())
> resctrl_arch_disable_alloc();
> - if (rdt_mon_capable)
> + if (resctrl_arch_mon_capable())
> resctrl_arch_disable_mon();
> resctrl_mounted = false;
> kernfs_kill_sb(sb);
> @@ -2889,7 +2889,7 @@ static int mkdir_rdt_prepare_rmid_alloc(struct
> rdtgroup *rdtgrp) {
> int ret;
>
> - if (!rdt_mon_capable)
> + if (!resctrl_arch_mon_capable())
> return 0;
>
> ret = alloc_rmid(rdtgrp->closid);
> @@ -2911,7 +2911,7 @@ static int mkdir_rdt_prepare_rmid_alloc(struct
> rdtgroup *rdtgrp)
>
> static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) {
> - if (rdt_mon_capable)
> + if (resctrl_arch_mon_capable())
> free_rmid(rgrp->closid, rgrp->mon.rmid); }
>
> @@ -3075,7 +3075,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct
> kernfs_node *parent_kn,
>
> list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
>
> - if (rdt_mon_capable) {
> + if (resctrl_arch_mon_capable()) {
> /*
> * Create an empty mon_groups directory to hold the subset
> * of tasks and cpus to monitor.
> @@ -3130,14 +3130,14 @@ static int rdtgroup_mkdir(struct kernfs_node
> *parent_kn, const char *name,
> * allocation is supported, add a control and monitoring
> * subdirectory
> */
> - if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn)
> + if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn)
> return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
>
> /*
> * If RDT monitoring is supported and the parent directory is a valid
> * "mon_groups" directory, add a monitoring subdirectory.
> */
> - if (rdt_mon_capable && is_mon_groups(parent_kn, name))
> + if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name))
> return rdtgroup_mkdir_mon(parent_kn, name, mode);
>
> return -EPERM;
> @@ -3341,7 +3341,7 @@ void resctrl_offline_domain(struct rdt_resource *r,
> struct rdt_domain *d)
> * If resctrl is mounted, remove all the
> * per domain monitor data directories.
> */
> - if (resctrl_mounted && static_branch_unlikely(&rdt_mon_enable_key))
> + if (resctrl_mounted && resctrl_arch_mon_capable())
> rmdir_mondata_subdir_allrdtgrp(r, d->id);
>
> if (is_mbm_enabled())
> @@ -3418,7 +3418,7 @@ int resctrl_online_domain(struct rdt_resource *r,
> struct rdt_domain *d)
> if (is_llc_occupancy_enabled())
> INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
>
> - if (resctrl_mounted && static_branch_unlikely(&rdt_mon_enable_key))
> + if (resctrl_mounted && resctrl_arch_mon_capable())
> mkdir_mondata_subdir_allrdtgrp(r, d);
>
> return 0;
> --
> 2.30.2

Reviewed-by: Shaopeng Tan <tan.shaopeng@xxxxxxxxxxx>