[RFC PATCH 5/9] x86/sgx: Restrict mapping without an enclave page to PROT_NONE
From: Sean Christopherson
Date: Fri May 31 2019 - 19:36:36 EST
To support LSM integration, SGX will require userspace to explicitly
specify the allowed protections for each page. The allowed protections
will be supplied to and modified by LSMs (based on their policies).
To prevent userspace from circumventing the allowed protections, do not
allow PROT_{READ,WRITE,EXEC} mappings to an enclave without an
associated enclave page (which will track the allowed protections).
Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
---
arch/x86/kernel/cpu/sgx/driver/main.c | 5 +++++
arch/x86/kernel/cpu/sgx/encl.c | 30 +++++++++++++++++++++++++++
arch/x86/kernel/cpu/sgx/encl.h | 3 +++
3 files changed, 38 insertions(+)
diff --git a/arch/x86/kernel/cpu/sgx/driver/main.c b/arch/x86/kernel/cpu/sgx/driver/main.c
index 129d356aff30..65a87c2fdf02 100644
--- a/arch/x86/kernel/cpu/sgx/driver/main.c
+++ b/arch/x86/kernel/cpu/sgx/driver/main.c
@@ -63,6 +63,11 @@ static long sgx_compat_ioctl(struct file *filep, unsigned int cmd,
static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
{
struct sgx_encl *encl = file->private_data;
+ int ret;
+
+ ret = sgx_map_allowed(encl, vma->vm_start, vma->vm_end, vma->vm_flags);
+ if (ret)
+ return ret;
vma->vm_ops = &sgx_vm_ops;
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index f23ea0fbaa47..955d4f430adc 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -235,6 +235,35 @@ static void sgx_vma_close(struct vm_area_struct *vma)
kref_put(&encl->refcount, sgx_encl_release);
}
+int sgx_map_allowed(struct sgx_encl *encl, unsigned long start,
+ unsigned long end, unsigned long prot)
+{
+ struct sgx_encl_page *page;
+ unsigned long addr;
+
+ prot &= (VM_READ | VM_WRITE | VM_EXEC);
+ if (!prot || !encl)
+ return 0;
+
+ mutex_lock(&encl->lock);
+
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ page = radix_tree_lookup(&encl->page_tree, addr >> PAGE_SHIFT);
+ if (!page)
+ return -EACCES;
+ }
+
+ mutex_unlock(&encl->lock);
+
+ return 0;
+}
+
+static int sgx_vma_mprotect(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, unsigned long prot)
+{
+ return sgx_map_allowed(vma->vm_private_data, start, end, prot);
+}
+
static unsigned int sgx_vma_fault(struct vm_fault *vmf)
{
unsigned long addr = (unsigned long)vmf->address;
@@ -372,6 +401,7 @@ static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
const struct vm_operations_struct sgx_vm_ops = {
.close = sgx_vma_close,
.open = sgx_vma_open,
+ .mprotect = sgx_vma_mprotect,
.fault = sgx_vma_fault,
.access = sgx_vma_access,
};
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
index c557f0374d74..6e310e3b3fff 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
@@ -106,6 +106,9 @@ static inline unsigned long sgx_pcmd_offset(pgoff_t page_index)
sizeof(struct sgx_pcmd);
}
+int sgx_map_allowed(struct sgx_encl *encl, unsigned long start,
+ unsigned long end, unsigned long prot);
+
enum sgx_encl_mm_iter {
SGX_ENCL_MM_ITER_DONE = 0,
SGX_ENCL_MM_ITER_NEXT = 1,
--
2.21.0