x86/sev: Avoid WARN()s and panic()s in early boot code

Using WARN() or panic() while executing from the early 1:1 mapping is
unlikely to do anything useful: the string literals are passed using
their kernel virtual addresses which are not even mapped yet. But even
if they were, calling into the printk() machinery from the early 1:1
mapped code is not going to get very far.

So drop the WARN()s entirely, and replace panic() with a deadloop.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Link: https://lore.kernel.org/r/20241205112804.3416920-10-ardb+git@google.com
This commit is contained in:
Ard Biesheuvel 2024-12-05 12:28:06 +01:00 committed by Ingo Molnar
parent feffde684a
commit 09d35045cd
2 changed files with 10 additions and 14 deletions

View file

@ -777,15 +777,10 @@ early_set_pages_state(unsigned long vaddr, unsigned long paddr,
val = sev_es_rd_ghcb_msr();
if (WARN(GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP,
"Wrong PSC response code: 0x%x\n",
(unsigned int)GHCB_RESP_CODE(val)))
if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP)
goto e_term;
if (WARN(GHCB_MSR_PSC_RESP_VAL(val),
"Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n",
op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared",
paddr, GHCB_MSR_PSC_RESP_VAL(val)))
if (GHCB_MSR_PSC_RESP_VAL(val))
goto e_term;
/* Page validation must be performed after changing to private */
@ -821,7 +816,7 @@ void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
}
void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
unsigned long npages)
{
/*
@ -2361,8 +2356,8 @@ static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
call.rcx = pa;
ret = svsm_perform_call_protocol(&call);
if (ret)
panic("Can't remap the SVSM CA, ret=%d, rax_out=0x%llx\n", ret, call.rax_out);
while (ret)
cpu_relax(); /* too early to panic */
RIP_REL_REF(boot_svsm_caa) = (struct svsm_ca *)pa;
RIP_REL_REF(boot_svsm_caa_pa) = pa;

View file

@ -1243,7 +1243,7 @@ static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svs
__pval_terminate(pfn, action, page_size, ret, svsm_ret);
}
static void svsm_pval_4k_page(unsigned long paddr, bool validate)
static void __head svsm_pval_4k_page(unsigned long paddr, bool validate)
{
struct svsm_pvalidate_call *pc;
struct svsm_call call = {};
@ -1275,12 +1275,13 @@ static void svsm_pval_4k_page(unsigned long paddr, bool validate)
ret = svsm_perform_call_protocol(&call);
if (ret)
svsm_pval_terminate(pc, ret, call.rax_out);
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
native_local_irq_restore(flags);
}
static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, bool validate)
static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
bool validate)
{
int ret;
@ -1293,7 +1294,7 @@ static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, bool val
} else {
ret = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
if (ret)
__pval_terminate(PHYS_PFN(paddr), validate, RMP_PG_SIZE_4K, ret, 0);
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
}
}