x86/sev: Split off startup code from core code

Disentangle the SEV core code and the SEV code that is called during
early boot. The latter piece will be moved into startup/ in a subsequent
patch.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Dionna Amalie Glaze <dionnaglaze@google.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Kevin Loughlin <kevinloughlin@google.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20250418141253.2601348-11-ardb+git@google.com
This commit is contained in:
Ard Biesheuvel 2025-04-18 16:12:57 +02:00 committed by Ingo Molnar
parent b66fcee157
commit 234cf67fc3
5 changed files with 1662 additions and 1611 deletions

View file

@ -141,6 +141,8 @@ u64 svsm_get_caa_pa(void)
int svsm_perform_call_protocol(struct svsm_call *call);
u8 snp_vmpl;
/* Include code for early handlers */
#include "../../coco/sev/shared.c"

View file

@ -1,18 +1,18 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += core.o sev-nmi.o
obj-y += core.o sev-nmi.o startup.o
# jump tables are emitted using absolute references in non-PIC code
# so they cannot be used in the early SEV startup code
CFLAGS_core.o += -fno-jump-tables
CFLAGS_startup.o += -fno-jump-tables
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_core.o = -pg
CFLAGS_REMOVE_startup.o = -pg
endif
KASAN_SANITIZE_core.o := n
KMSAN_SANITIZE_core.o := n
KCOV_INSTRUMENT_core.o := n
KASAN_SANITIZE_startup.o := n
KMSAN_SANITIZE_startup.o := n
KCOV_INSTRUMENT_startup.o := n
# With some compiler versions the generated code results in boot hangs, caused
# by several compilation units. To be safe, disable all instrumentation.

File diff suppressed because it is too large Load diff

View file

@ -27,17 +27,12 @@
/*
* SVSM related information:
* When running under an SVSM, the VMPL that Linux is executing at must be
* non-zero. The VMPL is therefore used to indicate the presence of an SVSM.
*
* During boot, the page tables are set up as identity mapped and later
* changed to use kernel virtual addresses. Maintain separate virtual and
* physical addresses for the CAA to allow SVSM functions to be used during
* early boot, both with identity mapped virtual addresses and proper kernel
* virtual addresses.
*/
u8 snp_vmpl __ro_after_init;
EXPORT_SYMBOL_GPL(snp_vmpl);
struct svsm_ca *boot_svsm_caa __ro_after_init;
u64 boot_svsm_caa_pa __ro_after_init;
@ -1192,28 +1187,6 @@ static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
}
}
static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
int ret, u64 svsm_ret)
{
WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n",
pfn, action, page_size, ret, svsm_ret);
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
}
static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret)
{
unsigned int page_size;
bool action;
u64 pfn;
pfn = pc->entry[pc->cur_index].pfn;
action = pc->entry[pc->cur_index].action;
page_size = pc->entry[pc->cur_index].page_size;
__pval_terminate(pfn, action, page_size, ret, svsm_ret);
}
static void __head svsm_pval_4k_page(unsigned long paddr, bool validate)
{
struct svsm_pvalidate_call *pc;
@ -1269,260 +1242,6 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
}
}
static void pval_pages(struct snp_psc_desc *desc)
{
struct psc_entry *e;
unsigned long vaddr;
unsigned int size;
unsigned int i;
bool validate;
u64 pfn;
int rc;
for (i = 0; i <= desc->hdr.end_entry; i++) {
e = &desc->entries[i];
pfn = e->gfn;
vaddr = (unsigned long)pfn_to_kaddr(pfn);
size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
validate = e->operation == SNP_PAGE_STATE_PRIVATE;
rc = pvalidate(vaddr, size, validate);
if (!rc)
continue;
if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
unsigned long vaddr_end = vaddr + PMD_SIZE;
for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) {
rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
if (rc)
__pval_terminate(pfn, validate, RMP_PG_SIZE_4K, rc, 0);
}
} else {
__pval_terminate(pfn, validate, size, rc, 0);
}
}
}
static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
struct svsm_pvalidate_call *pc)
{
struct svsm_pvalidate_entry *pe;
/* Nothing in the CA yet */
pc->num_entries = 0;
pc->cur_index = 0;
pe = &pc->entry[0];
while (pfn < pfn_end) {
pe->page_size = RMP_PG_SIZE_4K;
pe->action = action;
pe->ignore_cf = 0;
pe->pfn = pfn;
pe++;
pfn++;
pc->num_entries++;
if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
break;
}
return pfn;
}
static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry,
struct svsm_pvalidate_call *pc)
{
struct svsm_pvalidate_entry *pe;
struct psc_entry *e;
/* Nothing in the CA yet */
pc->num_entries = 0;
pc->cur_index = 0;
pe = &pc->entry[0];
e = &desc->entries[desc_entry];
while (desc_entry <= desc->hdr.end_entry) {
pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
pe->action = e->operation == SNP_PAGE_STATE_PRIVATE;
pe->ignore_cf = 0;
pe->pfn = e->gfn;
pe++;
e++;
desc_entry++;
pc->num_entries++;
if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
break;
}
return desc_entry;
}
static void svsm_pval_pages(struct snp_psc_desc *desc)
{
struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY];
unsigned int i, pv_4k_count = 0;
struct svsm_pvalidate_call *pc;
struct svsm_call call = {};
unsigned long flags;
bool action;
u64 pc_pa;
int ret;
/*
* This can be called very early in the boot, use native functions in
* order to avoid paravirt issues.
*/
flags = native_local_irq_save();
/*
* The SVSM calling area (CA) can support processing 510 entries at a
* time. Loop through the Page State Change descriptor until the CA is
* full or the last entry in the descriptor is reached, at which time
* the SVSM is invoked. This repeats until all entries in the descriptor
* are processed.
*/
call.caa = svsm_get_caa();
pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
/* Protocol 0, Call ID 1 */
call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
call.rcx = pc_pa;
for (i = 0; i <= desc->hdr.end_entry;) {
i = svsm_build_ca_from_psc_desc(desc, i, pc);
do {
ret = svsm_perform_call_protocol(&call);
if (!ret)
continue;
/*
* Check if the entry failed because of an RMP mismatch (a
* PVALIDATE at 2M was requested, but the page is mapped in
* the RMP as 4K).
*/
if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) {
/* Save this entry for post-processing at 4K */
pv_4k[pv_4k_count++] = pc->entry[pc->cur_index];
/* Skip to the next one unless at the end of the list */
pc->cur_index++;
if (pc->cur_index < pc->num_entries)
ret = -EAGAIN;
else
ret = 0;
}
} while (ret == -EAGAIN);
if (ret)
svsm_pval_terminate(pc, ret, call.rax_out);
}
/* Process any entries that failed to be validated at 2M and validate them at 4K */
for (i = 0; i < pv_4k_count; i++) {
u64 pfn, pfn_end;
action = pv_4k[i].action;
pfn = pv_4k[i].pfn;
pfn_end = pfn + 512;
while (pfn < pfn_end) {
pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc);
ret = svsm_perform_call_protocol(&call);
if (ret)
svsm_pval_terminate(pc, ret, call.rax_out);
}
}
native_local_irq_restore(flags);
}
static void pvalidate_pages(struct snp_psc_desc *desc)
{
if (snp_vmpl)
svsm_pval_pages(desc);
else
pval_pages(desc);
}
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
{
int cur_entry, end_entry, ret = 0;
struct snp_psc_desc *data;
struct es_em_ctxt ctxt;
vc_ghcb_invalidate(ghcb);
/* Copy the input desc into GHCB shared buffer */
data = (struct snp_psc_desc *)ghcb->shared_buffer;
memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
/*
* As per the GHCB specification, the hypervisor can resume the guest
* before processing all the entries. Check whether all the entries
* are processed. If not, then keep retrying. Note, the hypervisor
* will update the data memory directly to indicate the status, so
* reference the data->hdr everywhere.
*
* The strategy here is to wait for the hypervisor to change the page
* state in the RMP table before guest accesses the memory pages. If the
* page state change was not successful, then later memory access will
* result in a crash.
*/
cur_entry = data->hdr.cur_entry;
end_entry = data->hdr.end_entry;
while (data->hdr.cur_entry <= data->hdr.end_entry) {
ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
/* This will advance the shared buffer data points to. */
ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
/*
* Page State Change VMGEXIT can pass error code through
* exit_info_2.
*/
if (WARN(ret || ghcb->save.sw_exit_info_2,
"SNP: PSC failed ret=%d exit_info_2=%llx\n",
ret, ghcb->save.sw_exit_info_2)) {
ret = 1;
goto out;
}
/* Verify that reserved bit is not set */
if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
ret = 1;
goto out;
}
/*
* Sanity check that entry processing is not going backwards.
* This will happen only if hypervisor is tricking us.
*/
if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
"SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
ret = 1;
goto out;
}
}
out:
return ret;
}
static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
unsigned long exit_code)
{

1395
arch/x86/coco/sev/startup.c Normal file

File diff suppressed because it is too large Load diff