x86/resctrl: Unwind properly from rdt_enable_ctx()

rdt_enable_ctx() enables the features provided during resctrl mount.

Additions to rdt_enable_ctx() are required to also modify error paths
of rdt_enable_ctx() callers to ensure correct unwinding if errors
are encountered after calling rdt_enable_ctx(). This is error prone.

Introduce rdt_disable_ctx() to refactor the error unwinding of
rdt_enable_ctx() to simplify future additions. This also simplifies
cleanup in rdt_kill_sb().

Suggested-by: Reinette Chatre <reinette.chatre@intel.com>
Signed-off-by: Babu Moger <babu.moger@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Peter Newman <peternewman@google.com>
Reviewed-by: Tan Shaopeng <tan.shaopeng@jp.fujitsu.com>
Reviewed-by: Fenghua Yu <fenghua.yu@intel.com>
Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Tested-by: Peter Newman <peternewman@google.com>
Tested-by: Tan Shaopeng <tan.shaopeng@jp.fujitsu.com>
Link: https://lore.kernel.org/r/20231017002308.134480-5-babu.moger@amd.com
This commit is contained in:
Babu Moger 2023-10-16 19:23:03 -05:00 committed by Borislav Petkov (AMD)
parent d41592435c
commit df5f3a1dd8

View file

@ -2308,14 +2308,6 @@ int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable)
return 0;
}
static void cdp_disable_all(void)
{
if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
}
/*
* We don't allow rdtgroup directories to be created anywhere
* except the root directory. Thus when looking for the rdtgroup
@ -2395,19 +2387,42 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn,
struct rdtgroup *prgrp,
struct kernfs_node **mon_data_kn);
static void rdt_disable_ctx(void)
{
resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
set_mba_sc(false);
}
static int rdt_enable_ctx(struct rdt_fs_context *ctx)
{
int ret = 0;
if (ctx->enable_cdpl2)
if (ctx->enable_cdpl2) {
ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true);
if (ret)
goto out_done;
}
if (!ret && ctx->enable_cdpl3)
if (ctx->enable_cdpl3) {
ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true);
if (ret)
goto out_cdpl2;
}
if (!ret && ctx->enable_mba_mbps)
if (ctx->enable_mba_mbps) {
ret = set_mba_sc(true);
if (ret)
goto out_cdpl3;
}
return 0;
out_cdpl3:
resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
out_cdpl2:
resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
out_done:
return ret;
}
@ -2515,13 +2530,13 @@ static int rdt_get_tree(struct fs_context *fc)
}
ret = rdt_enable_ctx(ctx);
if (ret < 0)
goto out_cdp;
if (ret)
goto out;
ret = schemata_list_create();
if (ret) {
schemata_list_destroy();
goto out_mba;
goto out_ctx;
}
closid_init();
@ -2580,11 +2595,8 @@ out_info:
kernfs_remove(kn_info);
out_schemata_free:
schemata_list_destroy();
out_mba:
if (ctx->enable_mba_mbps)
set_mba_sc(false);
out_cdp:
cdp_disable_all();
out_ctx:
rdt_disable_ctx();
out:
rdt_last_cmd_clear();
mutex_unlock(&rdtgroup_mutex);
@ -2816,12 +2828,11 @@ static void rdt_kill_sb(struct super_block *sb)
cpus_read_lock();
mutex_lock(&rdtgroup_mutex);
set_mba_sc(false);
rdt_disable_ctx();
/*Put everything back to default values. */
for_each_alloc_capable_rdt_resource(r)
reset_all_ctrls(r);
cdp_disable_all();
rmdir_all_sub();
rdt_pseudo_lock_release();
rdtgroup_default.mode = RDT_MODE_SHAREABLE;