x86/bugs: Restructure SRSO mitigation

Restructure SRSO to use select/update/apply functions to create
consistent vulnerability handling.  Like with retbleed, the command line
options directly select mitigations which can later be modified.

While at it, remove a comment which doesn't apply anymore due to the
changed mitigation detection flow.

Signed-off-by: David Kaplan <david.kaplan@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Link: https://lore.kernel.org/20250418161721.1855190-17-david.kaplan@amd.com
This commit is contained in:
David Kaplan 2025-04-18 11:17:21 -05:00 committed by Borislav Petkov (AMD)
parent d43ba2dc8e
commit 1f4bb068b4

View file

@ -84,6 +84,8 @@ static void __init srbds_select_mitigation(void);
static void __init srbds_apply_mitigation(void);
static void __init l1d_flush_select_mitigation(void);
static void __init srso_select_mitigation(void);
static void __init srso_update_mitigation(void);
static void __init srso_apply_mitigation(void);
static void __init gds_select_mitigation(void);
static void __init gds_apply_mitigation(void);
static void __init bhi_select_mitigation(void);
@ -208,11 +210,6 @@ void __init cpu_select_mitigations(void)
rfds_select_mitigation();
srbds_select_mitigation();
l1d_flush_select_mitigation();
/*
* srso_select_mitigation() depends and must run after
* retbleed_select_mitigation().
*/
srso_select_mitigation();
gds_select_mitigation();
bhi_select_mitigation();
@ -240,6 +237,8 @@ void __init cpu_select_mitigations(void)
mmio_update_mitigation();
rfds_update_mitigation();
bhi_update_mitigation();
/* srso_update_mitigation() depends on retbleed_update_mitigation(). */
srso_update_mitigation();
spectre_v1_apply_mitigation();
spectre_v2_apply_mitigation();
@ -252,6 +251,7 @@ void __init cpu_select_mitigations(void)
mmio_apply_mitigation();
rfds_apply_mitigation();
srbds_apply_mitigation();
srso_apply_mitigation();
gds_apply_mitigation();
bhi_apply_mitigation();
}
@ -2674,6 +2674,7 @@ early_param("l1tf", l1tf_cmdline);
enum srso_mitigation {
SRSO_MITIGATION_NONE,
SRSO_MITIGATION_AUTO,
SRSO_MITIGATION_UCODE_NEEDED,
SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
SRSO_MITIGATION_MICROCODE,
@ -2683,14 +2684,6 @@ enum srso_mitigation {
SRSO_MITIGATION_BP_SPEC_REDUCE,
};
enum srso_mitigation_cmd {
SRSO_CMD_OFF,
SRSO_CMD_MICROCODE,
SRSO_CMD_SAFE_RET,
SRSO_CMD_IBPB,
SRSO_CMD_IBPB_ON_VMEXIT,
};
static const char * const srso_strings[] = {
[SRSO_MITIGATION_NONE] = "Vulnerable",
[SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
@ -2702,8 +2695,7 @@ static const char * const srso_strings[] = {
[SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
};
static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET;
static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO;
static int __init srso_parse_cmdline(char *str)
{
@ -2711,15 +2703,15 @@ static int __init srso_parse_cmdline(char *str)
return -EINVAL;
if (!strcmp(str, "off"))
srso_cmd = SRSO_CMD_OFF;
srso_mitigation = SRSO_MITIGATION_NONE;
else if (!strcmp(str, "microcode"))
srso_cmd = SRSO_CMD_MICROCODE;
srso_mitigation = SRSO_MITIGATION_MICROCODE;
else if (!strcmp(str, "safe-ret"))
srso_cmd = SRSO_CMD_SAFE_RET;
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
else if (!strcmp(str, "ibpb"))
srso_cmd = SRSO_CMD_IBPB;
srso_mitigation = SRSO_MITIGATION_IBPB;
else if (!strcmp(str, "ibpb-vmexit"))
srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT;
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
else
pr_err("Ignoring unknown SRSO option (%s).", str);
@ -2731,132 +2723,83 @@ early_param("spec_rstack_overflow", srso_parse_cmdline);
static void __init srso_select_mitigation(void)
{
bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
bool has_microcode;
if (!boot_cpu_has_bug(X86_BUG_SRSO) ||
cpu_mitigations_off() ||
srso_cmd == SRSO_CMD_OFF) {
if (boot_cpu_has(X86_FEATURE_SBPB))
x86_pred_cmd = PRED_CMD_SBPB;
goto out;
}
if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
srso_mitigation = SRSO_MITIGATION_NONE;
if (srso_mitigation == SRSO_MITIGATION_NONE)
return;
if (srso_mitigation == SRSO_MITIGATION_AUTO)
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
if (has_microcode) {
/*
* Zen1/2 with SMT off aren't vulnerable after the right
* IBPB microcode has been applied.
*
* Zen1/2 don't have SBPB, no need to try to enable it here.
*/
if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
goto out;
}
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
srso_mitigation = SRSO_MITIGATION_IBPB;
goto out;
srso_mitigation = SRSO_MITIGATION_NONE;
return;
}
} else {
pr_warn("IBPB-extending microcode not applied!\n");
pr_warn(SRSO_NOTICE);
/* may be overwritten by SRSO_CMD_SAFE_RET below */
srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
}
switch (srso_cmd) {
case SRSO_CMD_MICROCODE:
if (has_microcode) {
srso_mitigation = SRSO_MITIGATION_MICROCODE;
pr_warn(SRSO_NOTICE);
}
break;
case SRSO_CMD_SAFE_RET:
if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))
switch (srso_mitigation) {
case SRSO_MITIGATION_SAFE_RET:
if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) {
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
goto ibpb_on_vmexit;
}
if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
/*
* Enable the return thunk for generated code
* like ftrace, static_call, etc.
*/
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);
if (boot_cpu_data.x86 == 0x19) {
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
x86_return_thunk = srso_alias_return_thunk;
} else {
setup_force_cpu_cap(X86_FEATURE_SRSO);
x86_return_thunk = srso_return_thunk;
}
if (has_microcode)
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
else
srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
} else {
if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
srso_mitigation = SRSO_MITIGATION_NONE;
}
if (!has_microcode)
srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
break;
case SRSO_CMD_IBPB:
if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
if (has_microcode) {
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
srso_mitigation = SRSO_MITIGATION_IBPB;
/*
* IBPB on entry already obviates the need for
* software-based untraining so clear those in case some
* other mitigation like Retbleed has selected them.
*/
setup_clear_cpu_cap(X86_FEATURE_UNRET);
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
/*
* There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
}
} else {
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
}
break;
ibpb_on_vmexit:
case SRSO_CMD_IBPB_ON_VMEXIT:
case SRSO_MITIGATION_IBPB_ON_VMEXIT:
if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
break;
}
if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
if (has_microcode) {
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
/*
* There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
}
} else {
fallthrough;
case SRSO_MITIGATION_IBPB:
if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
srso_mitigation = SRSO_MITIGATION_NONE;
}
if (!has_microcode)
srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
break;
default:
break;
}
}
out:
static void __init srso_update_mitigation(void)
{
/* If retbleed is using IBPB, that works for SRSO as well */
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB &&
boot_cpu_has(X86_FEATURE_IBPB_BRTYPE))
srso_mitigation = SRSO_MITIGATION_IBPB;
if (boot_cpu_has_bug(X86_BUG_SRSO) && !cpu_mitigations_off())
pr_info("%s\n", srso_strings[srso_mitigation]);
}
static void __init srso_apply_mitigation(void)
{
/*
* Clear the feature flag if this mitigation is not selected as that
* feature flag controls the BpSpecReduce MSR bit toggling in KVM.
@ -2864,8 +2807,52 @@ out:
if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
if (srso_mitigation != SRSO_MITIGATION_NONE)
pr_info("%s\n", srso_strings[srso_mitigation]);
if (srso_mitigation == SRSO_MITIGATION_NONE) {
if (boot_cpu_has(X86_FEATURE_SBPB))
x86_pred_cmd = PRED_CMD_SBPB;
return;
}
switch (srso_mitigation) {
case SRSO_MITIGATION_SAFE_RET:
case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
/*
* Enable the return thunk for generated code
* like ftrace, static_call, etc.
*/
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);
if (boot_cpu_data.x86 == 0x19) {
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
x86_return_thunk = srso_alias_return_thunk;
} else {
setup_force_cpu_cap(X86_FEATURE_SRSO);
x86_return_thunk = srso_return_thunk;
}
break;
case SRSO_MITIGATION_IBPB:
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
/*
* IBPB on entry already obviates the need for
* software-based untraining so clear those in case some
* other mitigation like Retbleed has selected them.
*/
setup_clear_cpu_cap(X86_FEATURE_UNRET);
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
fallthrough;
case SRSO_MITIGATION_IBPB_ON_VMEXIT:
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
/*
* There is no need for RSB filling: entry_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
break;
default:
break;
}
}
#undef pr_fmt