mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

Intel TDX doesn't allow VMM to directly access guest private memory. Any memory that is required for communication with the VMM must be shared explicitly. The same rule applies for any DMA to and from the TDX guest. All DMA pages have to be marked as shared pages. A generic way to achieve this without any changes to device drivers is to use the SWIOTLB framework. The previous patch ("Add support for TDX shared memory") gave TDX guests the _ability_ to make some pages shared, but did not make any pages shared. This actually marks SWIOTLB buffers *as* shared. Start returning true for cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) in TDX guests. This has several implications: - Allows the existing mem_encrypt_init() to be used for TDX which sets SWIOTLB buffers shared (aka. "decrypted"). - Ensures that all DMA is routed via the SWIOTLB mechanism (see pci_swiotlb_detect()) Stop selecting DYNAMIC_PHYSICAL_MASK directly. It will get set indirectly by selecting X86_MEM_ENCRYPT. mem_encrypt_init() is currently under an AMD-specific #ifdef. Move it to a generic area of the header. Co-developed-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com> Signed-off-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Tony Luck <tony.luck@intel.com> Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com> Link: https://lkml.kernel.org/r/20220405232939.73860-28-kirill.shutemov@linux.intel.com
137 lines
3.1 KiB
C
137 lines
3.1 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Confidential Computing Platform Capability checks
|
|
*
|
|
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
|
*
|
|
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
|
*/
|
|
|
|
#include <linux/export.h>
|
|
#include <linux/cc_platform.h>
|
|
|
|
#include <asm/coco.h>
|
|
#include <asm/processor.h>
|
|
|
|
static enum cc_vendor vendor __ro_after_init;
|
|
static u64 cc_mask __ro_after_init;
|
|
|
|
static bool intel_cc_platform_has(enum cc_attr attr)
|
|
{
|
|
switch (attr) {
|
|
case CC_ATTR_GUEST_UNROLL_STRING_IO:
|
|
case CC_ATTR_HOTPLUG_DISABLED:
|
|
case CC_ATTR_GUEST_MEM_ENCRYPT:
|
|
case CC_ATTR_MEM_ENCRYPT:
|
|
return true;
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* SME and SEV are very similar but they are not the same, so there are
|
|
* times that the kernel will need to distinguish between SME and SEV. The
|
|
* cc_platform_has() function is used for this. When a distinction isn't
|
|
* needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
|
|
*
|
|
* The trampoline code is a good example for this requirement. Before
|
|
* paging is activated, SME will access all memory as decrypted, but SEV
|
|
* will access all memory as encrypted. So, when APs are being brought
|
|
* up under SME the trampoline area cannot be encrypted, whereas under SEV
|
|
* the trampoline area must be encrypted.
|
|
*/
|
|
static bool amd_cc_platform_has(enum cc_attr attr)
|
|
{
|
|
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
|
switch (attr) {
|
|
case CC_ATTR_MEM_ENCRYPT:
|
|
return sme_me_mask;
|
|
|
|
case CC_ATTR_HOST_MEM_ENCRYPT:
|
|
return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
|
|
|
|
case CC_ATTR_GUEST_MEM_ENCRYPT:
|
|
return sev_status & MSR_AMD64_SEV_ENABLED;
|
|
|
|
case CC_ATTR_GUEST_STATE_ENCRYPT:
|
|
return sev_status & MSR_AMD64_SEV_ES_ENABLED;
|
|
|
|
/*
|
|
* With SEV, the rep string I/O instructions need to be unrolled
|
|
* but SEV-ES supports them through the #VC handler.
|
|
*/
|
|
case CC_ATTR_GUEST_UNROLL_STRING_IO:
|
|
return (sev_status & MSR_AMD64_SEV_ENABLED) &&
|
|
!(sev_status & MSR_AMD64_SEV_ES_ENABLED);
|
|
|
|
default:
|
|
return false;
|
|
}
|
|
#else
|
|
return false;
|
|
#endif
|
|
}
|
|
|
|
static bool hyperv_cc_platform_has(enum cc_attr attr)
|
|
{
|
|
return attr == CC_ATTR_GUEST_MEM_ENCRYPT;
|
|
}
|
|
|
|
bool cc_platform_has(enum cc_attr attr)
|
|
{
|
|
switch (vendor) {
|
|
case CC_VENDOR_AMD:
|
|
return amd_cc_platform_has(attr);
|
|
case CC_VENDOR_INTEL:
|
|
return intel_cc_platform_has(attr);
|
|
case CC_VENDOR_HYPERV:
|
|
return hyperv_cc_platform_has(attr);
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(cc_platform_has);
|
|
|
|
u64 cc_mkenc(u64 val)
|
|
{
|
|
/*
|
|
* Both AMD and Intel use a bit in the page table to indicate
|
|
* encryption status of the page.
|
|
*
|
|
* - for AMD, bit *set* means the page is encrypted
|
|
* - for Intel *clear* means encrypted.
|
|
*/
|
|
switch (vendor) {
|
|
case CC_VENDOR_AMD:
|
|
return val | cc_mask;
|
|
case CC_VENDOR_INTEL:
|
|
return val & ~cc_mask;
|
|
default:
|
|
return val;
|
|
}
|
|
}
|
|
|
|
u64 cc_mkdec(u64 val)
|
|
{
|
|
/* See comment in cc_mkenc() */
|
|
switch (vendor) {
|
|
case CC_VENDOR_AMD:
|
|
return val & ~cc_mask;
|
|
case CC_VENDOR_INTEL:
|
|
return val | cc_mask;
|
|
default:
|
|
return val;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(cc_mkdec);
|
|
|
|
__init void cc_set_vendor(enum cc_vendor v)
|
|
{
|
|
vendor = v;
|
|
}
|
|
|
|
__init void cc_set_mask(u64 mask)
|
|
{
|
|
cc_mask = mask;
|
|
}
|