2020-11-13 00:01:22 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Copyright(c) 2016-20 Intel Corporation. */
|
|
|
|
|
|
|
|
#include <linux/acpi.h>
|
|
|
|
#include <linux/miscdevice.h>
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/security.h>
|
|
|
|
#include <linux/suspend.h>
|
|
|
|
#include <asm/traps.h>
|
|
|
|
#include "driver.h"
|
|
|
|
#include "encl.h"
|
|
|
|
|
2020-11-13 00:01:25 +02:00
|
|
|
u64 sgx_attributes_reserved_mask;
|
|
|
|
u64 sgx_xfrm_reserved_mask = ~0x3;
|
|
|
|
u32 sgx_misc_reserved_mask;
|
|
|
|
|
2020-11-13 00:01:22 +02:00
|
|
|
static int sgx_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct sgx_encl *encl;
|
x86/sgx: Add a page reclaimer
Just like normal RAM, there is a limited amount of enclave memory available
and overcommitting it is a very valuable tool to reduce resource use.
Introduce a simple reclaim mechanism for enclave pages.
In contrast to normal page reclaim, the kernel cannot directly access
enclave memory. To get around this, the SGX architecture provides a set of
functions to help. Among other things, these functions copy enclave memory
to and from normal memory, encrypting it and protecting its integrity in
the process.
Implement a page reclaimer by using these functions. Picks victim pages in
LRU fashion from all the enclaves running in the system. A new kernel
thread (ksgxswapd) reclaims pages in the background based on watermarks,
similar to normal kswapd.
All enclave pages can be reclaimed, architecturally. But, there are some
limits to this, such as the special SECS metadata page which must be
reclaimed last. The page version array (used to mitigate replaying old
reclaimed pages) is also architecturally reclaimable, but not yet
implemented. The end result is that the vast majority of enclave pages are
currently reclaimable.
Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Jethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-22-jarkko@kernel.org
2020-11-13 00:01:32 +02:00
|
|
|
int ret;
|
2020-11-13 00:01:22 +02:00
|
|
|
|
|
|
|
encl = kzalloc(sizeof(*encl), GFP_KERNEL);
|
|
|
|
if (!encl)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
x86/sgx: Add a page reclaimer
Just like normal RAM, there is a limited amount of enclave memory available
and overcommitting it is a very valuable tool to reduce resource use.
Introduce a simple reclaim mechanism for enclave pages.
In contrast to normal page reclaim, the kernel cannot directly access
enclave memory. To get around this, the SGX architecture provides a set of
functions to help. Among other things, these functions copy enclave memory
to and from normal memory, encrypting it and protecting its integrity in
the process.
Implement a page reclaimer by using these functions. Picks victim pages in
LRU fashion from all the enclaves running in the system. A new kernel
thread (ksgxswapd) reclaims pages in the background based on watermarks,
similar to normal kswapd.
All enclave pages can be reclaimed, architecturally. But, there are some
limits to this, such as the special SECS metadata page which must be
reclaimed last. The page version array (used to mitigate replaying old
reclaimed pages) is also architecturally reclaimable, but not yet
implemented. The end result is that the vast majority of enclave pages are
currently reclaimable.
Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Jethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-22-jarkko@kernel.org
2020-11-13 00:01:32 +02:00
|
|
|
kref_init(&encl->refcount);
|
2020-11-13 00:01:22 +02:00
|
|
|
xa_init(&encl->page_array);
|
|
|
|
mutex_init(&encl->lock);
|
x86/sgx: Add a page reclaimer
Just like normal RAM, there is a limited amount of enclave memory available
and overcommitting it is a very valuable tool to reduce resource use.
Introduce a simple reclaim mechanism for enclave pages.
In contrast to normal page reclaim, the kernel cannot directly access
enclave memory. To get around this, the SGX architecture provides a set of
functions to help. Among other things, these functions copy enclave memory
to and from normal memory, encrypting it and protecting its integrity in
the process.
Implement a page reclaimer by using these functions. Picks victim pages in
LRU fashion from all the enclaves running in the system. A new kernel
thread (ksgxswapd) reclaims pages in the background based on watermarks,
similar to normal kswapd.
All enclave pages can be reclaimed, architecturally. But, there are some
limits to this, such as the special SECS metadata page which must be
reclaimed last. The page version array (used to mitigate replaying old
reclaimed pages) is also architecturally reclaimable, but not yet
implemented. The end result is that the vast majority of enclave pages are
currently reclaimable.
Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Jethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-22-jarkko@kernel.org
2020-11-13 00:01:32 +02:00
|
|
|
INIT_LIST_HEAD(&encl->va_pages);
|
|
|
|
INIT_LIST_HEAD(&encl->mm_list);
|
|
|
|
spin_lock_init(&encl->mm_lock);
|
|
|
|
|
|
|
|
ret = init_srcu_struct(&encl->srcu);
|
|
|
|
if (ret) {
|
|
|
|
kfree(encl);
|
|
|
|
return ret;
|
|
|
|
}
|
2020-11-13 00:01:22 +02:00
|
|
|
|
|
|
|
file->private_data = encl;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sgx_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct sgx_encl *encl = file->private_data;
|
x86/sgx: Add a page reclaimer
Just like normal RAM, there is a limited amount of enclave memory available
and overcommitting it is a very valuable tool to reduce resource use.
Introduce a simple reclaim mechanism for enclave pages.
In contrast to normal page reclaim, the kernel cannot directly access
enclave memory. To get around this, the SGX architecture provides a set of
functions to help. Among other things, these functions copy enclave memory
to and from normal memory, encrypting it and protecting its integrity in
the process.
Implement a page reclaimer by using these functions. Picks victim pages in
LRU fashion from all the enclaves running in the system. A new kernel
thread (ksgxswapd) reclaims pages in the background based on watermarks,
similar to normal kswapd.
All enclave pages can be reclaimed, architecturally. But, there are some
limits to this, such as the special SECS metadata page which must be
reclaimed last. The page version array (used to mitigate replaying old
reclaimed pages) is also architecturally reclaimable, but not yet
implemented. The end result is that the vast majority of enclave pages are
currently reclaimable.
Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Jethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-22-jarkko@kernel.org
2020-11-13 00:01:32 +02:00
|
|
|
struct sgx_encl_mm *encl_mm;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Drain the remaining mm_list entries. At this point the list contains
|
|
|
|
* entries for processes, which have closed the enclave file but have
|
|
|
|
* not exited yet. The processes, which have exited, are gone from the
|
|
|
|
* list by sgx_mmu_notifier_release().
|
|
|
|
*/
|
|
|
|
for ( ; ; ) {
|
|
|
|
spin_lock(&encl->mm_lock);
|
|
|
|
|
|
|
|
if (list_empty(&encl->mm_list)) {
|
|
|
|
encl_mm = NULL;
|
|
|
|
} else {
|
|
|
|
encl_mm = list_first_entry(&encl->mm_list,
|
|
|
|
struct sgx_encl_mm, list);
|
|
|
|
list_del_rcu(&encl_mm->list);
|
2020-11-13 00:01:22 +02:00
|
|
|
}
|
|
|
|
|
x86/sgx: Add a page reclaimer
Just like normal RAM, there is a limited amount of enclave memory available
and overcommitting it is a very valuable tool to reduce resource use.
Introduce a simple reclaim mechanism for enclave pages.
In contrast to normal page reclaim, the kernel cannot directly access
enclave memory. To get around this, the SGX architecture provides a set of
functions to help. Among other things, these functions copy enclave memory
to and from normal memory, encrypting it and protecting its integrity in
the process.
Implement a page reclaimer by using these functions. Picks victim pages in
LRU fashion from all the enclaves running in the system. A new kernel
thread (ksgxswapd) reclaims pages in the background based on watermarks,
similar to normal kswapd.
All enclave pages can be reclaimed, architecturally. But, there are some
limits to this, such as the special SECS metadata page which must be
reclaimed last. The page version array (used to mitigate replaying old
reclaimed pages) is also architecturally reclaimable, but not yet
implemented. The end result is that the vast majority of enclave pages are
currently reclaimable.
Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Jethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-22-jarkko@kernel.org
2020-11-13 00:01:32 +02:00
|
|
|
spin_unlock(&encl->mm_lock);
|
2020-11-13 00:01:22 +02:00
|
|
|
|
x86/sgx: Add a page reclaimer
Just like normal RAM, there is a limited amount of enclave memory available
and overcommitting it is a very valuable tool to reduce resource use.
Introduce a simple reclaim mechanism for enclave pages.
In contrast to normal page reclaim, the kernel cannot directly access
enclave memory. To get around this, the SGX architecture provides a set of
functions to help. Among other things, these functions copy enclave memory
to and from normal memory, encrypting it and protecting its integrity in
the process.
Implement a page reclaimer by using these functions. Picks victim pages in
LRU fashion from all the enclaves running in the system. A new kernel
thread (ksgxswapd) reclaims pages in the background based on watermarks,
similar to normal kswapd.
All enclave pages can be reclaimed, architecturally. But, there are some
limits to this, such as the special SECS metadata page which must be
reclaimed last. The page version array (used to mitigate replaying old
reclaimed pages) is also architecturally reclaimable, but not yet
implemented. The end result is that the vast majority of enclave pages are
currently reclaimable.
Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Jethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-22-jarkko@kernel.org
2020-11-13 00:01:32 +02:00
|
|
|
/* The enclave is no longer mapped by any mm. */
|
|
|
|
if (!encl_mm)
|
|
|
|
break;
|
2020-11-13 00:01:22 +02:00
|
|
|
|
x86/sgx: Add a page reclaimer
Just like normal RAM, there is a limited amount of enclave memory available
and overcommitting it is a very valuable tool to reduce resource use.
Introduce a simple reclaim mechanism for enclave pages.
In contrast to normal page reclaim, the kernel cannot directly access
enclave memory. To get around this, the SGX architecture provides a set of
functions to help. Among other things, these functions copy enclave memory
to and from normal memory, encrypting it and protecting its integrity in
the process.
Implement a page reclaimer by using these functions. Picks victim pages in
LRU fashion from all the enclaves running in the system. A new kernel
thread (ksgxswapd) reclaims pages in the background based on watermarks,
similar to normal kswapd.
All enclave pages can be reclaimed, architecturally. But, there are some
limits to this, such as the special SECS metadata page which must be
reclaimed last. The page version array (used to mitigate replaying old
reclaimed pages) is also architecturally reclaimable, but not yet
implemented. The end result is that the vast majority of enclave pages are
currently reclaimable.
Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Jethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-22-jarkko@kernel.org
2020-11-13 00:01:32 +02:00
|
|
|
synchronize_srcu(&encl->srcu);
|
|
|
|
mmu_notifier_unregister(&encl_mm->mmu_notifier, encl_mm->mm);
|
|
|
|
kfree(encl_mm);
|
2021-02-08 00:14:01 +02:00
|
|
|
|
|
|
|
/* 'encl_mm' is gone, put encl_mm->encl reference: */
|
|
|
|
kref_put(&encl->refcount, sgx_encl_release);
|
2020-11-13 00:01:22 +02:00
|
|
|
}
|
|
|
|
|
x86/sgx: Add a page reclaimer
Just like normal RAM, there is a limited amount of enclave memory available
and overcommitting it is a very valuable tool to reduce resource use.
Introduce a simple reclaim mechanism for enclave pages.
In contrast to normal page reclaim, the kernel cannot directly access
enclave memory. To get around this, the SGX architecture provides a set of
functions to help. Among other things, these functions copy enclave memory
to and from normal memory, encrypting it and protecting its integrity in
the process.
Implement a page reclaimer by using these functions. Picks victim pages in
LRU fashion from all the enclaves running in the system. A new kernel
thread (ksgxswapd) reclaims pages in the background based on watermarks,
similar to normal kswapd.
All enclave pages can be reclaimed, architecturally. But, there are some
limits to this, such as the special SECS metadata page which must be
reclaimed last. The page version array (used to mitigate replaying old
reclaimed pages) is also architecturally reclaimable, but not yet
implemented. The end result is that the vast majority of enclave pages are
currently reclaimable.
Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Jethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-22-jarkko@kernel.org
2020-11-13 00:01:32 +02:00
|
|
|
kref_put(&encl->refcount, sgx_encl_release);
|
2020-11-13 00:01:22 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct sgx_encl *encl = file->private_data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = sgx_encl_may_map(encl, vma->vm_start, vma->vm_end, vma->vm_flags);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
x86/sgx: Add a page reclaimer
Just like normal RAM, there is a limited amount of enclave memory available
and overcommitting it is a very valuable tool to reduce resource use.
Introduce a simple reclaim mechanism for enclave pages.
In contrast to normal page reclaim, the kernel cannot directly access
enclave memory. To get around this, the SGX architecture provides a set of
functions to help. Among other things, these functions copy enclave memory
to and from normal memory, encrypting it and protecting its integrity in
the process.
Implement a page reclaimer by using these functions. Picks victim pages in
LRU fashion from all the enclaves running in the system. A new kernel
thread (ksgxswapd) reclaims pages in the background based on watermarks,
similar to normal kswapd.
All enclave pages can be reclaimed, architecturally. But, there are some
limits to this, such as the special SECS metadata page which must be
reclaimed last. The page version array (used to mitigate replaying old
reclaimed pages) is also architecturally reclaimable, but not yet
implemented. The end result is that the vast majority of enclave pages are
currently reclaimable.
Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Jethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-22-jarkko@kernel.org
2020-11-13 00:01:32 +02:00
|
|
|
ret = sgx_encl_mm_add(encl, vma->vm_mm);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2020-11-13 00:01:22 +02:00
|
|
|
vma->vm_ops = &sgx_vm_ops;
|
2023-01-26 11:37:49 -08:00
|
|
|
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
|
2020-11-13 00:01:22 +02:00
|
|
|
vma->vm_private_data = encl;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long sgx_get_unmapped_area(struct file *file,
|
|
|
|
unsigned long addr,
|
|
|
|
unsigned long len,
|
|
|
|
unsigned long pgoff,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
if ((flags & MAP_TYPE) == MAP_PRIVATE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (flags & MAP_FIXED)
|
|
|
|
return addr;
|
|
|
|
|
mm: switch mm->get_unmapped_area() to a flag
The mm_struct contains a function pointer *get_unmapped_area(), which is
set to either arch_get_unmapped_area() or arch_get_unmapped_area_topdown()
during the initialization of the mm.
Since the function pointer only ever points to two functions that are
named the same across all arch's, a function pointer is not really
required. In addition future changes will want to add versions of the
functions that take additional arguments. So to save a pointers worth of
bytes in mm_struct, and prevent adding additional function pointers to
mm_struct in future changes, remove it and keep the information about
which get_unmapped_area() to use in a flag.
Add the new flag to MMF_INIT_MASK so it doesn't get clobbered on fork by
mmf_init_flags(). Most MM flags get clobbered on fork. In the
pre-existing behavior mm->get_unmapped_area() would get copied to the new
mm in dup_mm(), so not clobbering the flag preserves the existing behavior
around inheriting the topdown-ness.
Introduce a helper, mm_get_unmapped_area(), to easily convert code that
refers to the old function pointer to instead select and call either
arch_get_unmapped_area() or arch_get_unmapped_area_topdown() based on the
flag. Then drop the mm->get_unmapped_area() function pointer. Leave the
get_unmapped_area() pointer in struct file_operations alone. The main
purpose of this change is to reorganize in preparation for future changes,
but it also converts the calls of mm->get_unmapped_area() from indirect
branches into a direct ones.
The stress-ng bigheap benchmark calls realloc a lot, which calls through
get_unmapped_area() in the kernel. On x86, the change yielded a ~1%
improvement there on a retpoline config.
In testing a few x86 configs, removing the pointer unfortunately didn't
result in any actual size reductions in the compiled layout of mm_struct.
But depending on compiler or arch alignment requirements, the change could
shrink the size of mm_struct.
Link: https://lkml.kernel.org/r/20240326021656.202649-3-rick.p.edgecombe@intel.com
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
Cc: Borislav Petkov (AMD) <bp@alien8.de>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Deepak Gupta <debug@rivosinc.com>
Cc: Guo Ren <guoren@kernel.org>
Cc: Helge Deller <deller@gmx.de>
Cc: H. Peter Anvin (Intel) <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-03-25 19:16:44 -07:00
|
|
|
return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
|
2020-11-13 00:01:22 +02:00
|
|
|
}
|
|
|
|
|
2020-11-13 00:01:23 +02:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static long sgx_compat_ioctl(struct file *filep, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
|
|
|
{
|
|
|
|
return sgx_ioctl(filep, cmd, arg);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-11-13 00:01:22 +02:00
|
|
|
static const struct file_operations sgx_encl_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = sgx_open,
|
|
|
|
.release = sgx_release,
|
2020-11-13 00:01:23 +02:00
|
|
|
.unlocked_ioctl = sgx_ioctl,
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
.compat_ioctl = sgx_compat_ioctl,
|
|
|
|
#endif
|
2020-11-13 00:01:22 +02:00
|
|
|
.mmap = sgx_mmap,
|
|
|
|
.get_unmapped_area = sgx_get_unmapped_area,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct miscdevice sgx_dev_enclave = {
|
|
|
|
.minor = MISC_DYNAMIC_MINOR,
|
|
|
|
.name = "sgx_enclave",
|
|
|
|
.nodename = "sgx_enclave",
|
|
|
|
.fops = &sgx_encl_fops,
|
|
|
|
};
|
|
|
|
|
|
|
|
int __init sgx_drv_init(void)
|
|
|
|
{
|
2020-11-13 00:01:25 +02:00
|
|
|
unsigned int eax, ebx, ecx, edx;
|
|
|
|
u64 attr_mask;
|
|
|
|
u64 xfrm_mask;
|
2020-11-13 00:01:26 +02:00
|
|
|
int ret;
|
2020-11-13 00:01:25 +02:00
|
|
|
|
2025-03-09 18:22:16 +01:00
|
|
|
if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) {
|
|
|
|
pr_info("SGX disabled: SGX launch control CPU feature is not available, /dev/sgx_enclave disabled.\n");
|
2020-11-13 00:01:22 +02:00
|
|
|
return -ENODEV;
|
2025-03-09 18:22:16 +01:00
|
|
|
}
|
2020-11-13 00:01:22 +02:00
|
|
|
|
2020-11-13 00:01:25 +02:00
|
|
|
cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
if (!(eax & 1)) {
|
2025-03-09 18:22:16 +01:00
|
|
|
pr_info("SGX disabled: SGX1 instruction support not available, /dev/sgx_enclave disabled.\n");
|
2020-11-13 00:01:25 +02:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
sgx_misc_reserved_mask = ~ebx | SGX_MISC_RESERVED_MASK;
|
|
|
|
|
|
|
|
cpuid_count(SGX_CPUID, 1, &eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
attr_mask = (((u64)ebx) << 32) + (u64)eax;
|
|
|
|
sgx_attributes_reserved_mask = ~attr_mask | SGX_ATTR_RESERVED_MASK;
|
|
|
|
|
|
|
|
if (cpu_feature_enabled(X86_FEATURE_OSXSAVE)) {
|
|
|
|
xfrm_mask = (((u64)edx) << 32) + (u64)ecx;
|
|
|
|
sgx_xfrm_reserved_mask = ~xfrm_mask;
|
|
|
|
}
|
|
|
|
|
2020-11-13 00:01:26 +02:00
|
|
|
ret = misc_register(&sgx_dev_enclave);
|
2025-03-09 18:22:16 +01:00
|
|
|
if (ret) {
|
|
|
|
pr_info("SGX disabled: Unable to register the /dev/sgx_enclave driver (%d).\n", ret);
|
2020-11-13 00:01:26 +02:00
|
|
|
return ret;
|
2025-03-09 18:22:16 +01:00
|
|
|
}
|
2020-11-13 00:01:26 +02:00
|
|
|
|
|
|
|
return 0;
|
2020-11-13 00:01:22 +02:00
|
|
|
}
|