ENT-1189 - Update SGX driver (#163)

* Remove linux-sgx-driver; re-add subtree (currently not playing ball)

* Squashed 'sgx-jvm/linux-sgx-driver/' content from commit 03435d33d

git-subtree-dir: sgx-jvm/linux-sgx-driver
git-subtree-split: 03435d33de0bcca6c5777f23ac161249b9158f1e
This commit is contained in:
Tommy Lillehagen 2017-12-14 12:36:34 +00:00 committed by GitHub
parent f21f8e7142
commit a260d7eb0b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 2786 additions and 1482 deletions

View File

@ -4,14 +4,15 @@ ifneq ($(KERNELRELEASE),)
sgx_page_cache.o \ sgx_page_cache.o \
sgx_ioctl.o \ sgx_ioctl.o \
sgx_vma.o \ sgx_vma.o \
sgx_util.o sgx_util.o\
sgx_encl.o
obj-m += isgx.o obj-m += isgx.o
else else
KDIR := /lib/modules/$(shell uname -r)/build KDIR := /lib/modules/$(shell uname -r)/build
PWD := $(shell pwd) PWD := $(shell pwd)
default: default:
$(MAKE) -C $(KDIR) SUBDIRS=$(PWD) modules $(MAKE) -C $(KDIR) SUBDIRS=$(PWD) CFLAGS_MODULE="-DDEBUG -g -O0" modules
install: default install: default
$(MAKE) INSTALL_MOD_DIR=kernel/drivers/intel/sgx -C $(KDIR) M=$(PWD) modules_install $(MAKE) INSTALL_MOD_DIR=kernel/drivers/intel/sgx -C $(KDIR) M=$(PWD) modules_install

View File

@ -11,6 +11,13 @@ The Linux SGX software stack is comprised of the Intel(R) SGX driver, the Intel(
The [linux-sgx-driver](https://github.com/01org/linux-sgx-driver) project hosts the out-of-tree driver for the Linux Intel(R) SGX software stack, which will be used until the driver upstreaming process is complete. The [linux-sgx-driver](https://github.com/01org/linux-sgx-driver) project hosts the out-of-tree driver for the Linux Intel(R) SGX software stack, which will be used until the driver upstreaming process is complete.
Within the linux-sgx-driver project, two versions of the out-of-tree driver are provided. Both versions are compatible with the linux-sgx PSW and SDK:
- SGX 2.0 Linux Driver (sgx2)
* The sgx2 branch of the linux-sgx-driver project contains the SGX 2.0 Linux Driver. This driver has additional support for SGX 2.0-based features available in upcoming CPUs. This driver has the same behavior as the SGX Linux Driver (master) on CPUs without SGX 2.0 support.
- SGX Linux Driver (master)
* The master branch of the linux-sgx-driver project tracks the proposed upstream version of the SGX driver and does not yet support SGX 2.0-based features.
License License
------- -------
See License.txt for details. See License.txt for details.
@ -20,6 +27,8 @@ Contributing
Starting from 05/2017, we are importing the sgx driver code from the in-kernel sgx repository located at git-hub: https://github.com/jsakkine-intel/linux-sgx.git. Any contribution should be done there. Future versions of the sgx driver code will be imported later on. The motivation behind this decision is to maintain a single source code of the sgx linux driver. Starting from 05/2017, we are importing the sgx driver code from the in-kernel sgx repository located at git-hub: https://github.com/jsakkine-intel/linux-sgx.git. Any contribution should be done there. Future versions of the sgx driver code will be imported later on. The motivation behind this decision is to maintain a single source code of the sgx linux driver.
An additional directory inker2ext/ has been created, it contains a script and a patch file that can be used in order to separately generate the code base of the sgx external module; it can be used in case someone wants the newest sgx driver as an external module and does not want to wait for next update. An additional directory inker2ext/ has been created, it contains a script and a patch file that can be used in order to separately generate the code base of the sgx external module; it can be used in case someone wants the newest sgx driver as an external module and does not want to wait for next update.
The sgx2 branch hosts an initial implementation supporting SGX 2.0. This patch is maintained in inker2ext/sgx2.patch in the 2.0 branch and will be periodically rebased to take updates from the linux-sgx-driver:master branch. Contributions for this patch should be managed directly through the linux-sgx-driver project on Github.
Documentation Documentation
------------- -------------
- [Intel(R) SGX for Linux\* OS](https://01.org/intel-softwareguard-extensions) project home page on [01.org](http://01.org) - [Intel(R) SGX for Linux\* OS](https://01.org/intel-softwareguard-extensions) project home page on [01.org](http://01.org)
@ -39,6 +48,8 @@ Build and Install the Intel(R) SGX Driver
- Configure the system with the **SGX hardware enabled** option. - Configure the system with the **SGX hardware enabled** option.
### Build the Intel(R) SGX Driver ### Build the Intel(R) SGX Driver
**Note:** To use the SGX 2.0 driver, checkout or download the sgx2 branch and then follow the build instructions.
To build Intel(R) SGX driver, change the directory to the driver path and enter the following command: To build Intel(R) SGX driver, change the directory to the driver path and enter the following command:
``` ```
$ make $ make

View File

@ -1,27 +1,26 @@
#! /bin/bash #! /bin/bash
# Should run within git-hub sgx driver directory: # Should be run from git-hub sgx driver root directory.
# Assumes in kernel sgx master branch code repo has been cloned
#
# Usage: # Usage:
# kernel_2_extern <in-kernel-root-path> <patch-file-name> # kernel_2_extern <in-kernel-root-path>
pa=`pwd` pa=`pwd`
file="$1/arch/x86/include/asm/sgx.h" patchfile="$pa/inker2ext/internal-to-external-tree-changes.patch"
if [ ! -f $file ]; then
echo "Missing file $file"
exit
fi
cp $file sgx_arch.h
file="$1/arch/x86/include/uapi/asm/sgx.h"
if [ ! -f $file ]; then if [ ! -f $file ]; then
echo "Missing file $file" echo "Missing patch file: $file"
echo "You should run the script from the out of tree driver repository root directory"
exit exit
fi fi
cp $file sgx_user.h
cd $1 cd $1
git apply $pa/$2 git apply $patchfile
cp *.c $pa
cp *.h $pa
cp Makefile $pa
cd $pa cd $pa
cp $1/drivers/platform/x86/intel_sgx/sgx* .

View File

@ -4,7 +4,7 @@
* *
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016-2017 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -21,7 +21,7 @@
* *
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016-2017 Intel Corporation.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@ -60,7 +60,7 @@
#ifndef __ARCH_INTEL_SGX_H__ #ifndef __ARCH_INTEL_SGX_H__
#define __ARCH_INTEL_SGX_H__ #define __ARCH_INTEL_SGX_H__
#include "sgx_user.h" #include "sgx_asm.h"
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/version.h> #include <linux/version.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
@ -70,6 +70,7 @@
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
#include "sgx_arch.h" #include "sgx_arch.h"
#include "sgx_user.h"
#define SGX_EINIT_SPIN_COUNT 20 #define SGX_EINIT_SPIN_COUNT 20
#define SGX_EINIT_SLEEP_COUNT 50 #define SGX_EINIT_SLEEP_COUNT 50
@ -77,6 +78,16 @@
#define SGX_VA_SLOT_COUNT 512 #define SGX_VA_SLOT_COUNT 512
struct sgx_epc_page {
resource_size_t pa;
struct list_head list;
struct sgx_encl_page *encl_page;
};
enum sgx_alloc_flags {
SGX_ALLOC_ATOMIC = BIT(0),
};
struct sgx_va_page { struct sgx_va_page {
struct sgx_epc_page *epc_page; struct sgx_epc_page *epc_page;
DECLARE_BITMAP(slots, SGX_VA_SLOT_COUNT); DECLARE_BITMAP(slots, SGX_VA_SLOT_COUNT);
@ -108,7 +119,6 @@ struct sgx_encl_page {
unsigned long addr; unsigned long addr;
unsigned int flags; unsigned int flags;
struct sgx_epc_page *epc_page; struct sgx_epc_page *epc_page;
struct list_head load_list;
struct sgx_va_page *va_page; struct sgx_va_page *va_page;
unsigned int va_offset; unsigned int va_offset;
}; };
@ -130,6 +140,8 @@ enum sgx_encl_flags {
struct sgx_encl { struct sgx_encl {
unsigned int flags; unsigned int flags;
uint64_t attributes;
uint64_t xfrm;
unsigned int secs_child_cnt; unsigned int secs_child_cnt;
struct mutex lock; struct mutex lock;
struct mm_struct *mm; struct mm_struct *mm;
@ -139,22 +151,23 @@ struct sgx_encl {
struct kref refcount; struct kref refcount;
unsigned long base; unsigned long base;
unsigned long size; unsigned long size;
unsigned long ssaframesize;
struct list_head va_pages; struct list_head va_pages;
struct radix_tree_root page_tree; struct radix_tree_root page_tree;
struct list_head add_page_reqs; struct list_head add_page_reqs;
struct work_struct add_page_work; struct work_struct add_page_work;
struct sgx_encl_page secs_page; struct sgx_encl_page secs;
struct sgx_tgid_ctx *tgid_ctx; struct sgx_tgid_ctx *tgid_ctx;
struct list_head encl_list; struct list_head encl_list;
struct mmu_notifier mmu_notifier; struct mmu_notifier mmu_notifier;
}; };
struct sgx_epc_bank { struct sgx_epc_bank {
unsigned long pa;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
void *mem; unsigned long va;
#endif #endif
unsigned long start; unsigned long size;
unsigned long end;
}; };
extern struct workqueue_struct *sgx_add_page_wq; extern struct workqueue_struct *sgx_add_page_wq;
@ -163,22 +176,36 @@ extern int sgx_nr_epc_banks;
extern u64 sgx_encl_size_max_32; extern u64 sgx_encl_size_max_32;
extern u64 sgx_encl_size_max_64; extern u64 sgx_encl_size_max_64;
extern u64 sgx_xfrm_mask; extern u64 sgx_xfrm_mask;
extern u32 sgx_ssaframesize_tbl[64]; extern u32 sgx_misc_reserved;
extern bool sgx_has_sgx2; extern u32 sgx_xsave_size_tbl[64];
extern const struct vm_operations_struct sgx_vm_ops; extern const struct vm_operations_struct sgx_vm_ops;
extern atomic_t sgx_nr_pids;
#define sgx_pr_ratelimited(level, encl, fmt, ...) \ #define sgx_pr_ratelimited(level, encl, fmt, ...) \
pr_ ## level ## _ratelimited("intel_sgx: [%d:0x%p] " fmt, \ pr_ ## level ## _ratelimited("intel_sgx: [%d:0x%p] " fmt, \
pid_nr((encl)->tgid_ctx->tgid), \ pid_nr((encl)->tgid_ctx->tgid), \
(void *)(encl)->base, ##__VA_ARGS__) (void *)(encl)->base, ##__VA_ARGS__)
#define sgx_dbg(encl, fmt, ...) sgx_pr_ratelimited(debug, encl, fmt, ##__VA_ARGS__) #define sgx_dbg(encl, fmt, ...) \
#define sgx_info(encl, fmt, ...) sgx_pr_ratelimited(info, encl, fmt, ##__VA_ARGS__) sgx_pr_ratelimited(debug, encl, fmt, ##__VA_ARGS__)
#define sgx_warn(encl, fmt, ...) sgx_pr_ratelimited(warn, encl, fmt, ##__VA_ARGS__) #define sgx_info(encl, fmt, ...) \
#define sgx_err(encl, fmt, ...) sgx_pr_ratelimited(err, encl, fmt, ##__VA_ARGS__) sgx_pr_ratelimited(info, encl, fmt, ##__VA_ARGS__)
#define sgx_crit(encl, fmt, ...) sgx_pr_ratelimited(crit, encl, fmt, ##__VA_ARGS__) #define sgx_warn(encl, fmt, ...) \
sgx_pr_ratelimited(warn, encl, fmt, ##__VA_ARGS__)
#define sgx_err(encl, fmt, ...) \
sgx_pr_ratelimited(err, encl, fmt, ##__VA_ARGS__)
#define sgx_crit(encl, fmt, ...) \
sgx_pr_ratelimited(crit, encl, fmt, ##__VA_ARGS__)
int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct **vma);
void sgx_tgid_ctx_release(struct kref *ref);
int sgx_encl_create(struct sgx_secs *secs);
int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr, void *data,
struct sgx_secinfo *secinfo, unsigned int mrmask);
int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
struct sgx_einittoken *einittoken);
void sgx_encl_release(struct kref *ref);
long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
@ -196,13 +223,10 @@ void sgx_insert_pte(struct sgx_encl *encl,
struct sgx_epc_page *epc_page, struct sgx_epc_page *epc_page,
struct vm_area_struct *vma); struct vm_area_struct *vma);
int sgx_eremove(struct sgx_epc_page *epc_page); int sgx_eremove(struct sgx_epc_page *epc_page);
struct vm_area_struct *sgx_find_vma(struct sgx_encl *encl, unsigned long addr);
void sgx_zap_tcs_ptes(struct sgx_encl *encl, void sgx_zap_tcs_ptes(struct sgx_encl *encl,
struct vm_area_struct *vma); struct vm_area_struct *vma);
void sgx_invalidate(struct sgx_encl *encl, bool flush_cpus); void sgx_invalidate(struct sgx_encl *encl, bool flush_cpus);
void sgx_flush_cpus(struct sgx_encl *encl); void sgx_flush_cpus(struct sgx_encl *encl);
int sgx_find_encl(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct **vma);
enum sgx_fault_flags { enum sgx_fault_flags {
SGX_FAULT_RESERVE = BIT(0), SGX_FAULT_RESERVE = BIT(0),
@ -212,23 +236,19 @@ struct sgx_encl_page *sgx_fault_page(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
unsigned int flags); unsigned int flags);
void sgx_encl_release(struct kref *ref);
void sgx_tgid_ctx_release(struct kref *ref);
extern struct mutex sgx_tgid_ctx_mutex; extern struct mutex sgx_tgid_ctx_mutex;
extern struct list_head sgx_tgid_ctx_list; extern struct list_head sgx_tgid_ctx_list;
extern struct task_struct *ksgxswapd_tsk; extern atomic_t sgx_va_pages_cnt;
enum sgx_alloc_flags { int sgx_add_epc_bank(resource_size_t start, unsigned long size, int bank);
SGX_ALLOC_ATOMIC = BIT(0), int sgx_page_cache_init(void);
};
int ksgxswapd(void *p);
int sgx_page_cache_init(resource_size_t start, unsigned long size);
void sgx_page_cache_teardown(void); void sgx_page_cache_teardown(void);
struct sgx_epc_page *sgx_alloc_page(unsigned int flags); struct sgx_epc_page *sgx_alloc_page(unsigned int flags);
int sgx_free_page(struct sgx_epc_page *entry, struct sgx_encl *encl); void sgx_free_page(struct sgx_epc_page *entry, struct sgx_encl *encl);
void *sgx_get_page(struct sgx_epc_page *entry); void *sgx_get_page(struct sgx_epc_page *entry);
void sgx_put_page(void *epc_page_vaddr); void sgx_put_page(void *epc_page_vaddr);
void sgx_eblock(struct sgx_encl *encl, struct sgx_epc_page *epc_page);
void sgx_etrack(struct sgx_encl *encl);
#endif /* __ARCH_X86_INTEL_SGX_H__ */ #endif /* __ARCH_X86_INTEL_SGX_H__ */

View File

@ -4,7 +4,7 @@
* *
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016-2017 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -21,7 +21,7 @@
* *
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016-2017 Intel Corporation.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@ -52,20 +52,87 @@
* Authors: * Authors:
* *
* Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com> * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
* Suresh Siddha <suresh.b.siddha@intel.com>
* Serge Ayoun <serge.ayoun@intel.com>
* Shay Katz-zamir <shay.katz-zamir@intel.com>
*/ */
#ifndef _ASM_X86_SGX_H
#define _ASM_X86_SGX_H
#include <asm/asm.h>
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/types.h> #include <linux/types.h>
#ifndef _ASM_X86_SGX_ARCH_H
#define _ASM_X86_SGX_ARCH_H
#define SGX_CPUID 0x12 #define SGX_SSA_GPRS_SIZE 182
#define SGX_SSA_MISC_EXINFO_SIZE 16
enum sgx_misc {
SGX_MISC_EXINFO = 0x01,
};
#define SGX_MISC_RESERVED_MASK 0xFFFFFFFFFFFFFFFEL
enum sgx_attribute {
SGX_ATTR_DEBUG = 0x02,
SGX_ATTR_MODE64BIT = 0x04,
SGX_ATTR_PROVISIONKEY = 0x10,
SGX_ATTR_EINITTOKENKEY = 0x20,
};
#define SGX_ATTR_RESERVED_MASK 0xFFFFFFFFFFFFFFC9L
#define SGX_SECS_RESERVED1_SIZE 24
#define SGX_SECS_RESERVED2_SIZE 32
#define SGX_SECS_RESERVED3_SIZE 96
#define SGX_SECS_RESERVED4_SIZE 3836
struct sgx_secs {
uint64_t size;
uint64_t base;
uint32_t ssaframesize;
uint32_t miscselect;
uint8_t reserved1[SGX_SECS_RESERVED1_SIZE];
uint64_t attributes;
uint64_t xfrm;
uint32_t mrenclave[8];
uint8_t reserved2[SGX_SECS_RESERVED2_SIZE];
uint32_t mrsigner[8];
uint8_t reserved3[SGX_SECS_RESERVED3_SIZE];
uint16_t isvvprodid;
uint16_t isvsvn;
uint8_t reserved4[SGX_SECS_RESERVED4_SIZE];
};
enum sgx_tcs_flags {
SGX_TCS_DBGOPTIN = 0x01, /* cleared on EADD */
};
#define SGX_TCS_RESERVED_MASK 0xFFFFFFFFFFFFFFFEL
struct sgx_tcs {
uint64_t state;
uint64_t flags;
uint64_t ossa;
uint32_t cssa;
uint32_t nssa;
uint64_t oentry;
uint64_t aep;
uint64_t ofsbase;
uint64_t ogsbase;
uint32_t fslimit;
uint32_t gslimit;
uint64_t reserved[503];
};
struct sgx_pageinfo {
uint64_t linaddr;
uint64_t srcpge;
union {
uint64_t secinfo;
uint64_t pcmd;
};
uint64_t secs;
} __attribute__((aligned(32)));
#define SGX_SECINFO_PERMISSION_MASK 0x0000000000000007L
#define SGX_SECINFO_PAGE_TYPE_MASK 0x000000000000FF00L
#define SGX_SECINFO_RESERVED_MASK 0xFFFFFFFFFFFF00F8L
enum sgx_page_type { enum sgx_page_type {
SGX_PAGE_TYPE_SECS = 0x00, SGX_PAGE_TYPE_SECS = 0x00,
@ -78,277 +145,125 @@ enum sgx_secinfo_flags {
SGX_SECINFO_R = 0x01, SGX_SECINFO_R = 0x01,
SGX_SECINFO_W = 0x02, SGX_SECINFO_W = 0x02,
SGX_SECINFO_X = 0x04, SGX_SECINFO_X = 0x04,
SGX_SECINFO_SECS = 0x000ULL, SGX_SECINFO_SECS = (SGX_PAGE_TYPE_SECS << 8),
SGX_SECINFO_TCS = 0x100ULL, SGX_SECINFO_TCS = (SGX_PAGE_TYPE_TCS << 8),
SGX_SECINFO_REG = 0x200ULL, SGX_SECINFO_REG = (SGX_PAGE_TYPE_REG << 8),
}; };
struct sgx_secinfo { struct sgx_secinfo {
u64 flags; uint64_t flags;
u64 reserved[7]; uint64_t reserved[7];
} __aligned(128); } __attribute__((aligned(64)));
struct sgx_einittoken {
u32 valid;
u8 reserved1[206];
u16 isvsvnle;
u8 reserved2[92];
} __aligned(512);
enum isgx_secs_attributes {
SGX_SECS_A_DEBUG = BIT_ULL(1),
SGX_SECS_A_MODE64BIT = BIT_ULL(2),
SGX_SECS_A_PROVISION_KEY = BIT_ULL(4),
SGX_SECS_A_LICENSE_KEY = BIT_ULL(5),
SGX_SECS_A_RESERVED_MASK = (BIT_ULL(0) |
BIT_ULL(3) |
GENMASK_ULL(63, 6)),
};
#define SGX_SECS_RESERVED1_SIZE 28
#define SGX_SECS_RESERVED2_SIZE 32
#define SGX_SECS_RESERVED3_SIZE 96
#define SGX_SECS_RESERVED4_SIZE 3836
struct sgx_secs {
u64 size;
u64 base;
u32 ssaframesize;
uint8_t reserved1[SGX_SECS_RESERVED1_SIZE];
u64 flags;
u64 xfrm;
u32 mrenclave[8];
uint8_t reserved2[SGX_SECS_RESERVED2_SIZE];
u32 mrsigner[8];
uint8_t reserved3[SGX_SECS_RESERVED3_SIZE];
u16 isvvprodid;
u16 isvsvn;
uint8_t reserved[SGX_SECS_RESERVED4_SIZE];
};
struct sgx_tcs {
u64 state;
u64 flags;
u64 ossa;
u32 cssa;
u32 nssa;
u64 oentry;
u64 aep;
u64 ofsbase;
u64 ogsbase;
u32 fslimit;
u32 gslimit;
u64 reserved[503];
};
enum sgx_secinfo_masks {
SGX_SECINFO_PERMISSION_MASK = GENMASK_ULL(2, 0),
SGX_SECINFO_PAGE_TYPE_MASK = GENMASK_ULL(15, 8),
SGX_SECINFO_RESERVED_MASK = (GENMASK_ULL(7, 3) |
GENMASK_ULL(63, 16)),
};
struct sgx_pcmd { struct sgx_pcmd {
struct sgx_secinfo secinfo; struct sgx_secinfo secinfo;
u64 enclave_id; uint64_t enclave_id;
u8 reserved[40]; uint8_t reserved[40];
u8 mac[16]; uint8_t mac[16];
}; };
struct sgx_page_info { #define SGX_MODULUS_SIZE 384
u64 linaddr;
u64 srcpge;
union {
u64 secinfo;
u64 pcmd;
};
u64 secs;
} __aligned(32);
#define SIGSTRUCT_SIZE 1808 struct sgx_sigstruct_header {
#define EINITTOKEN_SIZE 304 uint64_t header1[2];
uint32_t vendor;
enum { uint32_t date;
ECREATE = 0x0, uint64_t header2[2];
EADD = 0x1, uint32_t swdefined;
EINIT = 0x2, uint8_t reserved1[84];
EREMOVE = 0x3,
EDGBRD = 0x4,
EDGBWR = 0x5,
EEXTEND = 0x6,
ELDU = 0x8,
EBLOCK = 0x9,
EPA = 0xA,
EWB = 0xB,
ETRACK = 0xC,
EAUG = 0xD,
EMODPR = 0xE,
EMODT = 0xF,
}; };
#define __encls_ret(rax, rbx, rcx, rdx) \ struct sgx_sigstruct_body {
({ \ uint32_t miscselect;
int ret; \ uint32_t miscmask;
asm volatile( \ uint8_t reserved2[20];
"1: .byte 0x0f, 0x01, 0xcf;\n\t" \ uint64_t attributes;
"2:\n" \ uint64_t xfrm;
".section .fixup,\"ax\"\n" \ uint8_t attributemask[16];
"3: jmp 2b\n" \ uint8_t mrenclave[32];
".previous\n" \ uint8_t reserved3[32];
_ASM_EXTABLE(1b, 3b) \ uint16_t isvprodid;
: "=a"(ret) \ uint16_t isvsvn;
: "a"(rax), "b"(rbx), "c"(rcx), "d"(rdx) \ } __attribute__((__packed__));
: "memory"); \
ret; \
})
#ifdef CONFIG_X86_64 struct sgx_sigstruct {
#define __encls(rax, rbx, rcx, rdx...) \ struct sgx_sigstruct_header header;
({ \ uint8_t modulus[SGX_MODULUS_SIZE];
int ret; \ uint32_t exponent;
asm volatile( \ uint8_t signature[SGX_MODULUS_SIZE];
"1: .byte 0x0f, 0x01, 0xcf;\n\t" \ struct sgx_sigstruct_body body;
" xor %%eax,%%eax;\n" \ uint8_t reserved4[12];
"2:\n" \ uint8_t q1[SGX_MODULUS_SIZE];
".section .fixup,\"ax\"\n" \ uint8_t q2[SGX_MODULUS_SIZE];
"3: movq $-1,%%rax\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=a"(ret), "=b"(rbx), "=c"(rcx) \
: "a"(rax), "b"(rbx), "c"(rcx), rdx \
: "memory"); \
ret; \
})
#else
#define __encls(rax, rbx, rcx, rdx...) \
({ \
int ret; \
asm volatile( \
"1: .byte 0x0f, 0x01, 0xcf;\n\t" \
" xor %%eax,%%eax;\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov $-1,%%eax\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=a"(ret), "=b"(rbx), "=c"(rcx) \
: "a"(rax), "b"(rbx), "c"(rcx), rdx \
: "memory"); \
ret; \
})
#endif
static inline unsigned long __ecreate(struct sgx_page_info *pginfo, void *secs)
{
return __encls(ECREATE, pginfo, secs, "d"(0));
}
static inline int __eextend(void *secs, void *epc)
{
return __encls(EEXTEND, secs, epc, "d"(0));
}
static inline int __eadd(struct sgx_page_info *pginfo, void *epc)
{
return __encls(EADD, pginfo, epc, "d"(0));
}
static inline int __einit(void *sigstruct, struct sgx_einittoken *einittoken,
void *secs)
{
return __encls_ret(EINIT, sigstruct, secs, einittoken);
}
static inline int __eremove(void *epc)
{
unsigned long rbx = 0;
unsigned long rdx = 0;
return __encls_ret(EREMOVE, rbx, epc, rdx);
}
static inline int __edbgwr(void *epc, unsigned long *data)
{
return __encls(EDGBWR, *data, epc, "d"(0));
}
static inline int __edbgrd(void *epc, unsigned long *data)
{
unsigned long rbx = 0;
int ret;
ret = __encls(EDGBRD, rbx, epc, "d"(0));
if (!ret)
*(unsigned long *) data = rbx;
return ret;
}
static inline int __etrack(void *epc)
{
unsigned long rbx = 0;
unsigned long rdx = 0;
return __encls_ret(ETRACK, rbx, epc, rdx);
}
static inline int __eldu(unsigned long rbx, unsigned long rcx,
unsigned long rdx)
{
return __encls_ret(ELDU, rbx, rcx, rdx);
}
static inline int __eblock(unsigned long rcx)
{
unsigned long rbx = 0;
unsigned long rdx = 0;
return __encls_ret(EBLOCK, rbx, rcx, rdx);
}
static inline int __epa(void *epc)
{
unsigned long rbx = SGX_PAGE_TYPE_VA;
return __encls(EPA, rbx, epc, "d"(0));
}
static inline int __ewb(struct sgx_page_info *pginfo, void *epc, void *va)
{
return __encls_ret(EWB, pginfo, epc, va);
}
static inline int __eaug(struct sgx_page_info *pginfo, void *epc)
{
return __encls(EAUG, pginfo, epc, "d"(0));
}
static inline int __emodpr(struct sgx_secinfo *secinfo, void *epc)
{
unsigned long rdx = 0;
return __encls_ret(EMODPR, secinfo, epc, rdx);
}
static inline int __emodt(struct sgx_secinfo *secinfo, void *epc)
{
unsigned long rdx = 0;
return __encls_ret(EMODT, secinfo, epc, rdx);
}
struct sgx_encl;
struct sgx_epc_page {
resource_size_t pa;
struct list_head free_list;
}; };
extern struct sgx_epc_page *sgx_alloc_page(unsigned int flags); struct sgx_sigstruct_payload {
extern int sgx_free_page(struct sgx_epc_page *entry, struct sgx_encl *encl); struct sgx_sigstruct_header header;
extern void *sgx_get_page(struct sgx_epc_page *entry); struct sgx_sigstruct_body body;
extern void sgx_put_page(void *epc_page_vaddr); };
#endif /* _ASM_X86_SGX_H */ struct sgx_einittoken_payload {
uint32_t valid;
uint32_t reserved1[11];
uint64_t attributes;
uint64_t xfrm;
uint8_t mrenclave[32];
uint8_t reserved2[32];
uint8_t mrsigner[32];
uint8_t reserved3[32];
};
struct sgx_einittoken {
struct sgx_einittoken_payload payload;
uint8_t cpusvnle[16];
uint16_t isvprodidle;
uint16_t isvsvnle;
uint8_t reserved2[24];
uint32_t maskedmiscselectle;
uint64_t maskedattributesle;
uint64_t maskedxfrmle;
uint8_t keyid[32];
uint8_t mac[16];
};
struct sgx_report {
uint8_t cpusvn[16];
uint32_t miscselect;
uint8_t reserved1[28];
uint64_t attributes;
uint64_t xfrm;
uint8_t mrenclave[32];
uint8_t reserved2[32];
uint8_t mrsigner[32];
uint8_t reserved3[96];
uint16_t isvprodid;
uint16_t isvsvn;
uint8_t reserved4[60];
uint8_t reportdata[64];
uint8_t keyid[32];
uint8_t mac[16];
};
struct sgx_targetinfo {
uint8_t mrenclave[32];
uint64_t attributes;
uint64_t xfrm;
uint8_t reserved1[4];
uint32_t miscselect;
uint8_t reserved2[456];
};
struct sgx_keyrequest {
uint16_t keyname;
uint16_t keypolicy;
uint16_t isvsvn;
uint16_t reserved1;
uint8_t cpusvn[16];
uint64_t attributemask;
uint64_t xfrmmask;
uint8_t keyid[32];
uint32_t miscmask;
uint8_t reserved2[436];
};
#endif /* _ASM_X86_SGX_ARCH_H */

View File

@ -0,0 +1,233 @@
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2016-2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Contact Information:
* Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
* Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
*
* BSD LICENSE
*
* Copyright(c) 2016-2017 Intel Corporation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors:
*
* Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
* Suresh Siddha <suresh.b.siddha@intel.com>
*/
#ifndef _ASM_X86_SGX_H
#define _ASM_X86_SGX_H
#include "sgx_arch.h"
#include <asm/asm.h>
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/types.h>
#define SGX_CPUID 0x12
enum sgx_cpuid {
SGX_CPUID_CAPABILITIES = 0,
SGX_CPUID_ATTRIBUTES = 1,
SGX_CPUID_EPC_BANKS = 2,
};
enum sgx_commands {
ECREATE = 0x0,
EADD = 0x1,
EINIT = 0x2,
EREMOVE = 0x3,
EDGBRD = 0x4,
EDGBWR = 0x5,
EEXTEND = 0x6,
ELDU = 0x8,
EBLOCK = 0x9,
EPA = 0xA,
EWB = 0xB,
ETRACK = 0xC,
EAUG = 0xD,
EMODPR = 0xE,
EMODT = 0xF,
};
#ifdef CONFIG_X86_64
#define XAX "%%rax"
#else
#define XAX "%%eax"
#endif
#define __encls_ret(rax, rbx, rcx, rdx) \
({ \
int ret; \
asm volatile( \
"1: .byte 0x0f, 0x01, 0xcf;\n\t" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov $-14,"XAX"\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=a"(ret) \
: "a"(rax), "b"(rbx), "c"(rcx), "d"(rdx) \
: "memory"); \
ret; \
})
#define __encls(rax, rbx, rcx, rdx...) \
({ \
int ret; \
asm volatile( \
"1: .byte 0x0f, 0x01, 0xcf;\n\t" \
" xor "XAX","XAX"\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov $-14,"XAX"\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=a"(ret), "=b"(rbx), "=c"(rcx) \
: "a"(rax), "b"(rbx), "c"(rcx), rdx \
: "memory"); \
ret; \
})
static inline unsigned long __ecreate(struct sgx_pageinfo *pginfo, void *secs)
{
return __encls(ECREATE, pginfo, secs, "d"(0));
}
static inline int __eextend(void *secs, void *epc)
{
return __encls(EEXTEND, secs, epc, "d"(0));
}
static inline int __eadd(struct sgx_pageinfo *pginfo, void *epc)
{
return __encls(EADD, pginfo, epc, "d"(0));
}
static inline int __einit(void *sigstruct, struct sgx_einittoken *einittoken,
void *secs)
{
return __encls_ret(EINIT, sigstruct, secs, einittoken);
}
static inline int __eremove(void *epc)
{
unsigned long rbx = 0;
unsigned long rdx = 0;
return __encls_ret(EREMOVE, rbx, epc, rdx);
}
static inline int __edbgwr(void *epc, unsigned long *data)
{
return __encls(EDGBWR, *data, epc, "d"(0));
}
static inline int __edbgrd(void *epc, unsigned long *data)
{
unsigned long rbx = 0;
int ret;
ret = __encls(EDGBRD, rbx, epc, "d"(0));
if (!ret)
*(unsigned long *) data = rbx;
return ret;
}
static inline int __etrack(void *epc)
{
unsigned long rbx = 0;
unsigned long rdx = 0;
return __encls_ret(ETRACK, rbx, epc, rdx);
}
static inline int __eldu(unsigned long rbx, unsigned long rcx,
unsigned long rdx)
{
return __encls_ret(ELDU, rbx, rcx, rdx);
}
static inline int __eblock(unsigned long rcx)
{
unsigned long rbx = 0;
unsigned long rdx = 0;
return __encls_ret(EBLOCK, rbx, rcx, rdx);
}
static inline int __epa(void *epc)
{
unsigned long rbx = SGX_PAGE_TYPE_VA;
return __encls(EPA, rbx, epc, "d"(0));
}
static inline int __ewb(struct sgx_pageinfo *pginfo, void *epc, void *va)
{
return __encls_ret(EWB, pginfo, epc, va);
}
static inline int __eaug(struct sgx_pageinfo *pginfo, void *epc)
{
return __encls(EAUG, pginfo, epc, "d"(0));
}
static inline int __emodpr(struct sgx_secinfo *secinfo, void *epc)
{
unsigned long rdx = 0;
return __encls_ret(EMODPR, secinfo, epc, rdx);
}
static inline int __emodt(struct sgx_secinfo *secinfo, void *epc)
{
unsigned long rdx = 0;
return __encls_ret(EMODT, secinfo, epc, rdx);
}
#endif /* _ASM_X86_SGX_H */

View File

@ -0,0 +1,993 @@
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2016-2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Contact Information:
* Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
* Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
*
* BSD LICENSE
*
* Copyright(c) 2016-2017 Intel Corporation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors:
*
* Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
* Suresh Siddha <suresh.b.siddha@intel.com>
* Serge Ayoun <serge.ayoun@intel.com>
* Shay Katz-zamir <shay.katz-zamir@intel.com>
* Sean Christopherson <sean.j.christopherson@intel.com>
*/
#include "sgx.h"
#include <asm/mman.h>
#include <linux/delay.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/ratelimit.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
#include <linux/sched/signal.h>
#else
#include <linux/signal.h>
#endif
#include "linux/file.h"
#include <linux/slab.h>
#include <linux/hashtable.h>
#include <linux/shmem_fs.h>
struct sgx_add_page_req {
struct sgx_encl *encl;
struct sgx_encl_page *encl_page;
struct sgx_secinfo secinfo;
u16 mrmask;
struct list_head list;
};
/**
* sgx_encl_find - find an enclave
* @mm: mm struct of the current process
* @addr: address in the ELRANGE
* @vma: the resulting VMA
*
* Finds an enclave identified by the given address. Gives back the VMA, that
* is part of the enclave, located in that address. The VMA is given back if it
* is a proper enclave VMA even if a &struct sgx_encl instance does not exist
* yet (enclave creation has not been performed).
*
* Return:
* 0 on success,
* -EINVAL if an enclave was not found,
* -ENOENT if the enclave has not been created yet
*/
int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct **vma)
{
struct vm_area_struct *result;
struct sgx_encl *encl;
result = find_vma(mm, addr);
if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start)
return -EINVAL;
encl = result->vm_private_data;
*vma = result;
return encl ? 0 : -ENOENT;
}
static struct sgx_tgid_ctx *sgx_find_tgid_ctx(struct pid *tgid)
{
struct sgx_tgid_ctx *ctx;
list_for_each_entry(ctx, &sgx_tgid_ctx_list, list)
if (pid_nr(ctx->tgid) == pid_nr(tgid))
return ctx;
return NULL;
}
static int sgx_add_to_tgid_ctx(struct sgx_encl *encl)
{
struct sgx_tgid_ctx *ctx;
struct pid *tgid = get_pid(task_tgid(current));
mutex_lock(&sgx_tgid_ctx_mutex);
ctx = sgx_find_tgid_ctx(tgid);
if (ctx) {
if (kref_get_unless_zero(&ctx->refcount)) {
encl->tgid_ctx = ctx;
mutex_unlock(&sgx_tgid_ctx_mutex);
put_pid(tgid);
return 0;
} else {
list_del_init(&ctx->list);
}
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
mutex_unlock(&sgx_tgid_ctx_mutex);
put_pid(tgid);
return -ENOMEM;
}
ctx->tgid = tgid;
kref_init(&ctx->refcount);
INIT_LIST_HEAD(&ctx->encl_list);
list_add(&ctx->list, &sgx_tgid_ctx_list);
encl->tgid_ctx = ctx;
mutex_unlock(&sgx_tgid_ctx_mutex);
return 0;
}
void sgx_tgid_ctx_release(struct kref *ref)
{
struct sgx_tgid_ctx *pe =
container_of(ref, struct sgx_tgid_ctx, refcount);
mutex_lock(&sgx_tgid_ctx_mutex);
list_del(&pe->list);
mutex_unlock(&sgx_tgid_ctx_mutex);
put_pid(pe->tgid);
kfree(pe);
}
static int sgx_measure(struct sgx_epc_page *secs_page,
struct sgx_epc_page *epc_page,
u16 mrmask)
{
void *secs;
void *epc;
int ret = 0;
int i, j;
for (i = 0, j = 1; i < 0x1000 && !ret; i += 0x100, j <<= 1) {
if (!(j & mrmask))
continue;
secs = sgx_get_page(secs_page);
epc = sgx_get_page(epc_page);
ret = __eextend(secs, (void *)((unsigned long)epc + i));
sgx_put_page(epc);
sgx_put_page(secs);
}
return ret;
}
static int sgx_eadd(struct sgx_epc_page *secs_page,
struct sgx_epc_page *epc_page,
unsigned long linaddr,
struct sgx_secinfo *secinfo,
struct page *backing)
{
struct sgx_pageinfo pginfo;
void *epc_page_vaddr;
int ret;
pginfo.srcpge = (unsigned long)kmap_atomic(backing);
pginfo.secs = (unsigned long)sgx_get_page(secs_page);
epc_page_vaddr = sgx_get_page(epc_page);
pginfo.linaddr = linaddr;
pginfo.secinfo = (unsigned long)secinfo;
ret = __eadd(&pginfo, epc_page_vaddr);
sgx_put_page(epc_page_vaddr);
sgx_put_page((void *)(unsigned long)pginfo.secs);
kunmap_atomic((void *)(unsigned long)pginfo.srcpge);
return ret;
}
static bool sgx_process_add_page_req(struct sgx_add_page_req *req,
struct sgx_epc_page *epc_page)
{
struct page *backing;
struct sgx_encl_page *encl_page = req->encl_page;
struct sgx_encl *encl = req->encl;
struct vm_area_struct *vma;
int ret;
if (encl->flags & (SGX_ENCL_SUSPEND | SGX_ENCL_DEAD))
return false;
ret = sgx_encl_find(encl->mm, encl_page->addr, &vma);
if (ret)
return false;
backing = sgx_get_backing(encl, encl_page, false);
if (IS_ERR(backing))
return false;
/* Do not race with do_exit() */
if (!atomic_read(&encl->mm->mm_users)) {
sgx_put_backing(backing, 0);
return false;
}
ret = vm_insert_pfn(vma, encl_page->addr, PFN_DOWN(epc_page->pa));
if (ret) {
sgx_put_backing(backing, 0);
return false;
}
ret = sgx_eadd(encl->secs.epc_page, epc_page, encl_page->addr,
&req->secinfo, backing);
sgx_put_backing(backing, 0);
if (ret) {
sgx_warn(encl, "EADD returned %d\n", ret);
zap_vma_ptes(vma, encl_page->addr, PAGE_SIZE);
return false;
}
encl->secs_child_cnt++;
ret = sgx_measure(encl->secs.epc_page, epc_page, req->mrmask);
if (ret) {
sgx_warn(encl, "EEXTEND returned %d\n", ret);
zap_vma_ptes(vma, encl_page->addr, PAGE_SIZE);
return false;
}
epc_page->encl_page = encl_page;
encl_page->epc_page = epc_page;
sgx_test_and_clear_young(encl_page, encl);
list_add_tail(&epc_page->list, &encl->load_list);
return true;
}
static void sgx_add_page_worker(struct work_struct *work)
{
struct sgx_encl *encl;
struct sgx_add_page_req *req;
struct sgx_epc_page *epc_page;
bool skip_rest = false;
bool is_empty = false;
encl = container_of(work, struct sgx_encl, add_page_work);
do {
schedule();
if (encl->flags & SGX_ENCL_DEAD)
skip_rest = true;
mutex_lock(&encl->lock);
req = list_first_entry(&encl->add_page_reqs,
struct sgx_add_page_req, list);
list_del(&req->list);
is_empty = list_empty(&encl->add_page_reqs);
mutex_unlock(&encl->lock);
if (skip_rest)
goto next;
epc_page = sgx_alloc_page(0);
if (IS_ERR(epc_page)) {
skip_rest = true;
goto next;
}
down_read(&encl->mm->mmap_sem);
mutex_lock(&encl->lock);
if (!sgx_process_add_page_req(req, epc_page)) {
sgx_free_page(epc_page, encl);
skip_rest = true;
}
mutex_unlock(&encl->lock);
up_read(&encl->mm->mmap_sem);
next:
kfree(req);
} while (!kref_put(&encl->refcount, sgx_encl_release) && !is_empty);
}
static u32 sgx_calc_ssaframesize(u32 miscselect, u64 xfrm)
{
u32 size_max = PAGE_SIZE;
u32 size;
int i;
for (i = 2; i < 64; i++) {
if (!((1 << i) & xfrm))
continue;
size = SGX_SSA_GPRS_SIZE + sgx_xsave_size_tbl[i];
if (miscselect & SGX_MISC_EXINFO)
size += SGX_SSA_MISC_EXINFO_SIZE;
if (size > size_max)
size_max = size;
}
return (size_max + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
static int sgx_validate_secs(const struct sgx_secs *secs,
unsigned long ssaframesize)
{
int i;
if (secs->size < (2 * PAGE_SIZE) ||
(secs->size & (secs->size - 1)) != 0)
return -EINVAL;
if (secs->base & (secs->size - 1))
return -EINVAL;
if (secs->attributes & SGX_ATTR_RESERVED_MASK ||
secs->miscselect & sgx_misc_reserved)
return -EINVAL;
if (secs->attributes & SGX_ATTR_MODE64BIT) {
#ifdef CONFIG_X86_64
if (secs->size > sgx_encl_size_max_64)
return -EINVAL;
#else
return -EINVAL;
#endif
} else {
/* On 64-bit architecture allow 32-bit encls only in
* the compatibility mode.
*/
#ifdef CONFIG_X86_64
if (!test_thread_flag(TIF_ADDR32))
return -EINVAL;
#endif
if (secs->size > sgx_encl_size_max_32)
return -EINVAL;
}
if ((secs->xfrm & 0x3) != 0x3 || (secs->xfrm & ~sgx_xfrm_mask))
return -EINVAL;
/* Check that BNDREGS and BNDCSR are equal. */
if (((secs->xfrm >> 3) & 1) != ((secs->xfrm >> 4) & 1))
return -EINVAL;
if (!secs->ssaframesize || ssaframesize > secs->ssaframesize)
return -EINVAL;
for (i = 0; i < SGX_SECS_RESERVED1_SIZE; i++)
if (secs->reserved1[i])
return -EINVAL;
for (i = 0; i < SGX_SECS_RESERVED2_SIZE; i++)
if (secs->reserved2[i])
return -EINVAL;
for (i = 0; i < SGX_SECS_RESERVED3_SIZE; i++)
if (secs->reserved3[i])
return -EINVAL;
for (i = 0; i < SGX_SECS_RESERVED4_SIZE; i++)
if (secs->reserved4[i])
return -EINVAL;
return 0;
}
static void sgx_mmu_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct sgx_encl *encl =
container_of(mn, struct sgx_encl, mmu_notifier);
mutex_lock(&encl->lock);
encl->flags |= SGX_ENCL_DEAD;
mutex_unlock(&encl->lock);
}
static const struct mmu_notifier_ops sgx_mmu_notifier_ops = {
.release = sgx_mmu_notifier_release,
};
static int sgx_init_page(struct sgx_encl *encl, struct sgx_encl_page *entry,
unsigned long addr, unsigned int alloc_flags)
{
struct sgx_va_page *va_page;
struct sgx_epc_page *epc_page = NULL;
unsigned int va_offset = PAGE_SIZE;
void *vaddr;
int ret = 0;
list_for_each_entry(va_page, &encl->va_pages, list) {
va_offset = sgx_alloc_va_slot(va_page);
if (va_offset < PAGE_SIZE)
break;
}
if (va_offset == PAGE_SIZE) {
va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
if (!va_page)
return -ENOMEM;
epc_page = sgx_alloc_page(alloc_flags);
if (IS_ERR(epc_page)) {
kfree(va_page);
return PTR_ERR(epc_page);
}
vaddr = sgx_get_page(epc_page);
if (!vaddr) {
sgx_warn(encl, "kmap of a new VA page failed %d\n",
ret);
sgx_free_page(epc_page, encl);
kfree(va_page);
return -EFAULT;
}
ret = __epa(vaddr);
sgx_put_page(vaddr);
if (ret) {
sgx_warn(encl, "EPA returned %d\n", ret);
sgx_free_page(epc_page, encl);
kfree(va_page);
return -EFAULT;
}
atomic_inc(&sgx_va_pages_cnt);
va_page->epc_page = epc_page;
va_offset = sgx_alloc_va_slot(va_page);
mutex_lock(&encl->lock);
list_add(&va_page->list, &encl->va_pages);
mutex_unlock(&encl->lock);
}
entry->va_page = va_page;
entry->va_offset = va_offset;
entry->addr = addr;
return 0;
}
/**
* sgx_encl_alloc - allocate memory for an enclave and set attributes
*
* @secs: SECS data (must be page aligned)
*
* Allocates a new &struct sgx_encl instance. Validates SECS attributes, creates
* backing storage for the enclave and sets enclave attributes to sane initial
* values.
*
* Return:
* &struct sgx_encl instance on success,
* system error on failure
*/
static struct sgx_encl *sgx_encl_alloc(struct sgx_secs *secs)
{
unsigned long ssaframesize;
struct sgx_encl *encl;
struct file *backing;
struct file *pcmd;
ssaframesize = sgx_calc_ssaframesize(secs->miscselect, secs->xfrm);
if (sgx_validate_secs(secs, ssaframesize))
return ERR_PTR(-EINVAL);
backing = shmem_file_setup("[dev/sgx]", secs->size + PAGE_SIZE,
VM_NORESERVE);
if (IS_ERR(backing))
return (void *)backing;
pcmd = shmem_file_setup("[dev/sgx]", (secs->size + PAGE_SIZE) >> 5,
VM_NORESERVE);
if (IS_ERR(pcmd)) {
fput(backing);
return (void *)pcmd;
}
encl = kzalloc(sizeof(*encl), GFP_KERNEL);
if (!encl) {
fput(backing);
fput(pcmd);
return ERR_PTR(-ENOMEM);
}
encl->attributes = secs->attributes;
encl->xfrm = secs->xfrm;
kref_init(&encl->refcount);
INIT_LIST_HEAD(&encl->add_page_reqs);
INIT_LIST_HEAD(&encl->va_pages);
INIT_RADIX_TREE(&encl->page_tree, GFP_KERNEL);
INIT_LIST_HEAD(&encl->load_list);
INIT_LIST_HEAD(&encl->encl_list);
mutex_init(&encl->lock);
INIT_WORK(&encl->add_page_work, sgx_add_page_worker);
encl->mm = current->mm;
encl->base = secs->base;
encl->size = secs->size;
encl->ssaframesize = secs->ssaframesize;
encl->backing = backing;
encl->pcmd = pcmd;
return encl;
}
/**
* sgx_encl_create - create an enclave
*
* @secs: page aligned SECS data
*
* Validates SECS attributes, allocates an EPC page for the SECS and creates
* the enclave by performing ECREATE.
*
* Return:
* 0 on success,
* system error on failure
*/
int sgx_encl_create(struct sgx_secs *secs)
{
struct sgx_pageinfo pginfo;
struct sgx_secinfo secinfo;
struct sgx_encl *encl;
struct sgx_epc_page *secs_epc;
struct vm_area_struct *vma;
void *secs_vaddr;
long ret;
encl = sgx_encl_alloc(secs);
if (IS_ERR(encl))
return PTR_ERR(encl);
secs_epc = sgx_alloc_page(0);
if (IS_ERR(secs_epc)) {
ret = PTR_ERR(secs_epc);
goto out;
}
encl->secs.epc_page = secs_epc;
ret = sgx_add_to_tgid_ctx(encl);
if (ret)
goto out;
ret = sgx_init_page(encl, &encl->secs, encl->base + encl->size, 0);
if (ret)
goto out;
secs_vaddr = sgx_get_page(secs_epc);
pginfo.srcpge = (unsigned long)secs;
pginfo.linaddr = 0;
pginfo.secinfo = (unsigned long)&secinfo;
pginfo.secs = 0;
memset(&secinfo, 0, sizeof(secinfo));
ret = __ecreate((void *)&pginfo, secs_vaddr);
sgx_put_page(secs_vaddr);
if (ret) {
sgx_dbg(encl, "ECREATE returned %ld\n", ret);
ret = -EFAULT;
goto out;
}
if (secs->attributes & SGX_ATTR_DEBUG)
encl->flags |= SGX_ENCL_DEBUG;
encl->mmu_notifier.ops = &sgx_mmu_notifier_ops;
ret = mmu_notifier_register(&encl->mmu_notifier, encl->mm);
if (ret) {
if (ret == -EINTR)
ret = -ERESTARTSYS;
encl->mmu_notifier.ops = NULL;
goto out;
}
down_read(&current->mm->mmap_sem);
ret = sgx_encl_find(current->mm, secs->base, &vma);
if (ret != -ENOENT) {
if (!ret)
ret = -EINVAL;
up_read(&current->mm->mmap_sem);
goto out;
}
if (vma->vm_start != secs->base ||
vma->vm_end != (secs->base + secs->size)
/* vma->vm_pgoff != 0 */) {
ret = -EINVAL;
up_read(&current->mm->mmap_sem);
goto out;
}
vma->vm_private_data = encl;
up_read(&current->mm->mmap_sem);
mutex_lock(&sgx_tgid_ctx_mutex);
list_add_tail(&encl->encl_list, &encl->tgid_ctx->encl_list);
mutex_unlock(&sgx_tgid_ctx_mutex);
return 0;
out:
if (encl)
kref_put(&encl->refcount, sgx_encl_release);
return ret;
}
static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
{
u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK;
u64 page_type = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
int i;
if ((secinfo->flags & SGX_SECINFO_RESERVED_MASK) ||
((perm & SGX_SECINFO_W) && !(perm & SGX_SECINFO_R)) ||
(page_type != SGX_SECINFO_TCS &&
page_type != SGX_SECINFO_REG))
return -EINVAL;
for (i = 0; i < sizeof(secinfo->reserved) / sizeof(u64); i++)
if (secinfo->reserved[i])
return -EINVAL;
return 0;
}
static bool sgx_validate_offset(struct sgx_encl *encl, unsigned long offset)
{
if (offset & (PAGE_SIZE - 1))
return false;
if (offset >= encl->size)
return false;
return true;
}
static int sgx_validate_tcs(struct sgx_encl *encl, struct sgx_tcs *tcs)
{
int i;
if (tcs->flags & SGX_TCS_RESERVED_MASK) {
sgx_dbg(encl, "%s: invalid TCS flags = 0x%lx\n",
__func__, (unsigned long)tcs->flags);
return -EINVAL;
}
if (tcs->flags & SGX_TCS_DBGOPTIN) {
sgx_dbg(encl, "%s: DBGOPTIN TCS flag is set, EADD will clear it\n",
__func__);
return -EINVAL;
}
if (!sgx_validate_offset(encl, tcs->ossa)) {
sgx_dbg(encl, "%s: invalid OSSA: 0x%lx\n", __func__,
(unsigned long)tcs->ossa);
return -EINVAL;
}
if (!sgx_validate_offset(encl, tcs->ofsbase)) {
sgx_dbg(encl, "%s: invalid OFSBASE: 0x%lx\n", __func__,
(unsigned long)tcs->ofsbase);
return -EINVAL;
}
if (!sgx_validate_offset(encl, tcs->ogsbase)) {
sgx_dbg(encl, "%s: invalid OGSBASE: 0x%lx\n", __func__,
(unsigned long)tcs->ogsbase);
return -EINVAL;
}
if ((tcs->fslimit & 0xFFF) != 0xFFF) {
sgx_dbg(encl, "%s: invalid FSLIMIT: 0x%x\n", __func__,
tcs->fslimit);
return -EINVAL;
}
if ((tcs->gslimit & 0xFFF) != 0xFFF) {
sgx_dbg(encl, "%s: invalid GSLIMIT: 0x%x\n", __func__,
tcs->gslimit);
return -EINVAL;
}
for (i = 0; i < sizeof(tcs->reserved) / sizeof(u64); i++)
if (tcs->reserved[i])
return -EINVAL;
return 0;
}
static int __sgx_encl_add_page(struct sgx_encl *encl,
struct sgx_encl_page *encl_page,
unsigned long addr,
void *data,
struct sgx_secinfo *secinfo,
unsigned int mrmask)
{
u64 page_type = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
struct page *backing;
struct sgx_add_page_req *req = NULL;
int ret;
int empty;
void *backing_ptr;
if (sgx_validate_secinfo(secinfo))
return -EINVAL;
if (page_type == SGX_SECINFO_TCS) {
ret = sgx_validate_tcs(encl, data);
if (ret)
return ret;
}
ret = sgx_init_page(encl, encl_page, addr, 0);
if (ret)
return ret;
mutex_lock(&encl->lock);
if (encl->flags & (SGX_ENCL_INITIALIZED | SGX_ENCL_DEAD)) {
ret = -EINVAL;
goto out;
}
if (radix_tree_lookup(&encl->page_tree, addr >> PAGE_SHIFT)) {
ret = -EEXIST;
goto out;
}
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req) {
ret = -ENOMEM;
goto out;
}
backing = sgx_get_backing(encl, encl_page, false);
if (IS_ERR((void *)backing)) {
ret = PTR_ERR((void *)backing);
goto out;
}
ret = radix_tree_insert(&encl->page_tree, encl_page->addr >> PAGE_SHIFT,
encl_page);
if (ret) {
sgx_put_backing(backing, false /* write */);
goto out;
}
backing_ptr = kmap(backing);
memcpy(backing_ptr, data, PAGE_SIZE);
kunmap(backing);
if (page_type == SGX_SECINFO_TCS)
encl_page->flags |= SGX_ENCL_PAGE_TCS;
memcpy(&req->secinfo, secinfo, sizeof(*secinfo));
req->encl = encl;
req->encl_page = encl_page;
req->mrmask = mrmask;
empty = list_empty(&encl->add_page_reqs);
kref_get(&encl->refcount);
list_add_tail(&req->list, &encl->add_page_reqs);
if (empty)
queue_work(sgx_add_page_wq, &encl->add_page_work);
sgx_put_backing(backing, true /* write */);
mutex_unlock(&encl->lock);
return 0;
out:
kfree(req);
sgx_free_va_slot(encl_page->va_page, encl_page->va_offset);
mutex_unlock(&encl->lock);
return ret;
}
/**
* sgx_encl_add_page - add a page to the enclave
*
* @encl: an enclave
* @addr: page address in the ELRANGE
* @data: page data
* @secinfo: page permissions
* @mrmask: bitmask to select the 256 byte chunks to be measured
*
* Creates a new enclave page and enqueues an EADD operation that will be
* processed by a worker thread later on.
*
* Return:
* 0 on success,
* system error on failure
*/
int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr, void *data,
struct sgx_secinfo *secinfo, unsigned int mrmask)
{
struct sgx_encl_page *page;
int ret;
page = kzalloc(sizeof(*page), GFP_KERNEL);
if (!page)
return -ENOMEM;
ret = __sgx_encl_add_page(encl, page, addr, data, secinfo, mrmask);
if (ret)
kfree(page);
return ret;
}
static int sgx_einit(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
struct sgx_einittoken *token)
{
struct sgx_epc_page *secs_epc = encl->secs.epc_page;
void *secs_va;
int ret;
secs_va = sgx_get_page(secs_epc);
ret = __einit(sigstruct, token, secs_va);
sgx_put_page(secs_va);
return ret;
}
/**
* sgx_encl_init - perform EINIT for the given enclave
*
* @encl: an enclave
* @sigstruct: SIGSTRUCT for the enclave
* @token: EINITTOKEN for the enclave
*
* Retries a few times in order to perform EINIT operation on an enclave
* because there could be potentially an interrupt storm.
*
* Return:
* 0 on success,
* -FAULT on a CPU exception during EINIT,
* SGX error code
*/
int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
struct sgx_einittoken *token)
{
int ret;
int i;
int j;
flush_work(&encl->add_page_work);
mutex_lock(&encl->lock);
if (encl->flags & SGX_ENCL_INITIALIZED) {
mutex_unlock(&encl->lock);
return 0;
}
for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
ret = sgx_einit(encl, sigstruct, token);
if (ret == SGX_UNMASKED_EVENT)
continue;
else
break;
}
if (ret != SGX_UNMASKED_EVENT)
break;
msleep_interruptible(SGX_EINIT_SLEEP_TIME);
if (signal_pending(current)) {
mutex_unlock(&encl->lock);
return -ERESTARTSYS;
}
}
mutex_unlock(&encl->lock);
if (ret) {
if (ret > 0)
sgx_dbg(encl, "EINIT returned %d\n", ret);
return ret;
}
encl->flags |= SGX_ENCL_INITIALIZED;
return 0;
}
void sgx_encl_release(struct kref *ref)
{
struct sgx_encl_page *entry;
struct sgx_va_page *va_page;
struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
struct radix_tree_iter iter;
void **slot;
mutex_lock(&sgx_tgid_ctx_mutex);
if (!list_empty(&encl->encl_list))
list_del(&encl->encl_list);
mutex_unlock(&sgx_tgid_ctx_mutex);
if (encl->mmu_notifier.ops)
mmu_notifier_unregister_no_release(&encl->mmu_notifier,
encl->mm);
radix_tree_for_each_slot(slot, &encl->page_tree, &iter, 0) {
entry = *slot;
if (entry->epc_page) {
list_del(&entry->epc_page->list);
sgx_free_page(entry->epc_page, encl);
}
radix_tree_delete(&encl->page_tree, entry->addr >> PAGE_SHIFT);
kfree(entry);
}
while (!list_empty(&encl->va_pages)) {
va_page = list_first_entry(&encl->va_pages,
struct sgx_va_page, list);
list_del(&va_page->list);
sgx_free_page(va_page->epc_page, encl);
kfree(va_page);
atomic_dec(&sgx_va_pages_cnt);
}
if (encl->secs.epc_page)
sgx_free_page(encl->secs.epc_page, encl);
if (encl->tgid_ctx)
kref_put(&encl->tgid_ctx->refcount, sgx_tgid_ctx_release);
if (encl->backing)
fput(encl->backing);
if (encl->pcmd)
fput(encl->pcmd);
kfree(encl);
}

View File

@ -4,7 +4,7 @@
* *
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016-2017 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -21,7 +21,7 @@
* *
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016-2017 Intel Corporation.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@ -64,7 +64,7 @@
#include <linux/file.h> #include <linux/file.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#else #else
#include <linux/signal.h> #include <linux/signal.h>
@ -73,805 +73,138 @@
#include <linux/hashtable.h> #include <linux/hashtable.h>
#include <linux/shmem_fs.h> #include <linux/shmem_fs.h>
struct sgx_add_page_req { static int sgx_get_encl(unsigned long addr, struct sgx_encl **encl)
struct sgx_encl *encl;
struct sgx_encl_page *encl_page;
struct sgx_secinfo secinfo;
u16 mrmask;
struct list_head list;
};
static u16 sgx_isvsvnle_min;
atomic_t sgx_nr_pids = ATOMIC_INIT(0);
static struct sgx_tgid_ctx *sgx_find_tgid_ctx(struct pid *tgid)
{
struct sgx_tgid_ctx *ctx;
list_for_each_entry(ctx, &sgx_tgid_ctx_list, list)
if (pid_nr(ctx->tgid) == pid_nr(tgid))
return ctx;
return NULL;
}
static int sgx_add_to_tgid_ctx(struct sgx_encl *encl)
{
struct sgx_tgid_ctx *ctx;
struct pid *tgid = get_pid(task_tgid(current));
mutex_lock(&sgx_tgid_ctx_mutex);
ctx = sgx_find_tgid_ctx(tgid);
if (ctx) {
if (kref_get_unless_zero(&ctx->refcount)) {
encl->tgid_ctx = ctx;
mutex_unlock(&sgx_tgid_ctx_mutex);
put_pid(tgid);
return 0;
}
else
list_del_init(&ctx->list);
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
mutex_unlock(&sgx_tgid_ctx_mutex);
put_pid(tgid);
return -ENOMEM;
}
ctx->tgid = tgid;
kref_init(&ctx->refcount);
INIT_LIST_HEAD(&ctx->encl_list);
list_add(&ctx->list, &sgx_tgid_ctx_list);
atomic_inc(&sgx_nr_pids);
encl->tgid_ctx = ctx;
mutex_unlock(&sgx_tgid_ctx_mutex);
return 0;
}
void sgx_tgid_ctx_release(struct kref *ref)
{
struct sgx_tgid_ctx *pe =
container_of(ref, struct sgx_tgid_ctx, refcount);
mutex_lock(&sgx_tgid_ctx_mutex);
list_del(&pe->list);
atomic_dec(&sgx_nr_pids);
mutex_unlock(&sgx_tgid_ctx_mutex);
put_pid(pe->tgid);
kfree(pe);
}
static int sgx_find_and_get_encl(unsigned long addr, struct sgx_encl **encl)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int ret; int ret;
if (addr & (PAGE_SIZE - 1))
return -EINVAL;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
ret = sgx_find_encl(mm, addr, &vma); ret = sgx_encl_find(mm, addr, &vma);
if (!ret) { if (!ret) {
*encl = vma->vm_private_data; *encl = vma->vm_private_data;
if ((*encl)->flags & SGX_ENCL_SUSPEND)
ret = SGX_POWER_LOST_ENCLAVE;
else
kref_get(&(*encl)->refcount); kref_get(&(*encl)->refcount);
} }
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return ret; return ret;
} }
static int sgx_measure(struct sgx_epc_page *secs_page,
struct sgx_epc_page *epc_page,
u16 mrmask)
{
void *secs;
void *epc;
int ret = 0;
int i, j;
for (i = 0, j = 1; i < 0x1000 && !ret; i += 0x100, j <<= 1) {
if (!(j & mrmask))
continue;
secs = sgx_get_page(secs_page);
epc = sgx_get_page(epc_page);
ret = __eextend(secs, (void *)((unsigned long)epc + i));
sgx_put_page(epc);
sgx_put_page(secs);
}
return ret;
}
static int sgx_add_page(struct sgx_epc_page *secs_page,
struct sgx_epc_page *epc_page,
unsigned long linaddr,
struct sgx_secinfo *secinfo,
struct page *backing)
{
struct sgx_page_info pginfo;
void *epc_page_vaddr;
int ret;
pginfo.srcpge = (unsigned long)kmap_atomic(backing);
pginfo.secs = (unsigned long)sgx_get_page(secs_page);
epc_page_vaddr = sgx_get_page(epc_page);
pginfo.linaddr = linaddr;
pginfo.secinfo = (unsigned long)secinfo;
ret = __eadd(&pginfo, epc_page_vaddr);
sgx_put_page(epc_page_vaddr);
sgx_put_page((void *)(unsigned long)pginfo.secs);
kunmap_atomic((void *)(unsigned long)pginfo.srcpge);
return ret;
}
static bool sgx_process_add_page_req(struct sgx_add_page_req *req)
{
struct page *backing;
struct sgx_epc_page *epc_page;
struct sgx_encl_page *encl_page = req->encl_page;
struct sgx_encl *encl = req->encl;
struct vm_area_struct *vma;
int ret;
epc_page = sgx_alloc_page(0);
if (IS_ERR(epc_page))
return false;
down_read(&encl->mm->mmap_sem);
mutex_lock(&encl->lock);
if (encl->flags & SGX_ENCL_DEAD)
goto out;
if (sgx_find_encl(encl->mm, encl_page->addr, &vma))
goto out;
backing = sgx_get_backing(encl, encl_page, false);
if (IS_ERR(backing))
goto out;
/* Do not race with do_exit() */
if (!atomic_read(&encl->mm->mm_users)) {
sgx_put_backing(backing, 0);
goto out;
}
ret = vm_insert_pfn(vma, encl_page->addr, PFN_DOWN(epc_page->pa));
if (ret)
goto out;
ret = sgx_add_page(encl->secs_page.epc_page, epc_page,
encl_page->addr, &req->secinfo, backing);
sgx_put_backing(backing, 0);
if (ret) {
sgx_warn(encl, "EADD returned %d\n", ret);
zap_vma_ptes(vma, encl_page->addr, PAGE_SIZE);
goto out;
}
encl->secs_child_cnt++;
ret = sgx_measure(encl->secs_page.epc_page, epc_page, req->mrmask);
if (ret) {
sgx_warn(encl, "EEXTEND returned %d\n", ret);
zap_vma_ptes(vma, encl_page->addr, PAGE_SIZE);
goto out;
}
encl_page->epc_page = epc_page;
sgx_test_and_clear_young(encl_page, encl);
list_add_tail(&encl_page->load_list, &encl->load_list);
mutex_unlock(&encl->lock);
up_read(&encl->mm->mmap_sem);
return true;
out:
sgx_free_page(epc_page, encl);
mutex_unlock(&encl->lock);
up_read(&encl->mm->mmap_sem);
return false;
}
static void sgx_add_page_worker(struct work_struct *work)
{
struct sgx_encl *encl;
struct sgx_add_page_req *req;
bool skip_rest = false;
bool is_empty = false;
encl = container_of(work, struct sgx_encl, add_page_work);
do {
schedule();
if (encl->flags & SGX_ENCL_DEAD)
skip_rest = true;
mutex_lock(&encl->lock);
req = list_first_entry(&encl->add_page_reqs,
struct sgx_add_page_req, list);
list_del(&req->list);
is_empty = list_empty(&encl->add_page_reqs);
mutex_unlock(&encl->lock);
if (!skip_rest) {
if (!sgx_process_add_page_req(req)) {
skip_rest = true;
sgx_dbg(encl, "EADD failed 0x%p\n",
(void *)req->encl_page->addr);
}
}
kfree(req);
} while (!kref_put(&encl->refcount, sgx_encl_release) &&
!is_empty);
}
static int sgx_validate_secs(const struct sgx_secs *secs)
{
u32 needed_ssaframesize = 1;
u32 tmp;
int i;
if (secs->flags & SGX_SECS_A_RESERVED_MASK)
return -EINVAL;
if (secs->flags & SGX_SECS_A_MODE64BIT) {
#ifdef CONFIG_X86_64
if (secs->size > sgx_encl_size_max_64)
return -EINVAL;
#else
return -EINVAL;
#endif
} else {
/* On 64-bit architecture allow 32-bit encls only in
* the compatibility mode.
*/
#ifdef CONFIG_X86_64
if (!test_thread_flag(TIF_ADDR32))
return -EINVAL;
#endif
if (secs->size > sgx_encl_size_max_32)
return -EINVAL;
}
if ((secs->xfrm & 0x3) != 0x3 || (secs->xfrm & ~sgx_xfrm_mask))
return -EINVAL;
/* Check that BNDREGS and BNDCSR are equal. */
if (((secs->xfrm >> 3) & 1) != ((secs->xfrm >> 4) & 1))
return -EINVAL;
for (i = 2; i < 64; i++) {
tmp = sgx_ssaframesize_tbl[i];
if (((1 << i) & secs->xfrm) && (tmp > needed_ssaframesize))
needed_ssaframesize = tmp;
}
if (!secs->ssaframesize || !needed_ssaframesize ||
needed_ssaframesize > secs->ssaframesize)
return -EINVAL;
/* Must be power of two */
if (secs->size == 0 || (secs->size & (secs->size - 1)) != 0)
return -EINVAL;
for (i = 0; i < SGX_SECS_RESERVED1_SIZE; i++)
if (secs->reserved1[i])
return -EINVAL;
for (i = 0; i < SGX_SECS_RESERVED2_SIZE; i++)
if (secs->reserved2[i])
return -EINVAL;
for (i = 0; i < SGX_SECS_RESERVED3_SIZE; i++)
if (secs->reserved3[i])
return -EINVAL;
for (i = 0; i < SGX_SECS_RESERVED4_SIZE; i++)
if (secs->reserved[i])
return -EINVAL;
return 0;
}
static int sgx_init_page(struct sgx_encl *encl,
struct sgx_encl_page *entry,
unsigned long addr)
{
struct sgx_va_page *va_page;
struct sgx_epc_page *epc_page = NULL;
unsigned int va_offset = PAGE_SIZE;
void *vaddr;
int ret = 0;
list_for_each_entry(va_page, &encl->va_pages, list) {
va_offset = sgx_alloc_va_slot(va_page);
if (va_offset < PAGE_SIZE)
break;
}
if (va_offset == PAGE_SIZE) {
va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
if (!va_page)
return -ENOMEM;
epc_page = sgx_alloc_page(0);
if (IS_ERR(epc_page)) {
kfree(va_page);
return PTR_ERR(epc_page);
}
vaddr = sgx_get_page(epc_page);
if (!vaddr) {
sgx_warn(encl, "kmap of a new VA page failed %d\n",
ret);
sgx_free_page(epc_page, encl);
kfree(va_page);
return -EFAULT;
}
ret = __epa(vaddr);
sgx_put_page(vaddr);
if (ret) {
sgx_warn(encl, "EPA returned %d\n", ret);
sgx_free_page(epc_page, encl);
kfree(va_page);
return -EFAULT;
}
va_page->epc_page = epc_page;
va_offset = sgx_alloc_va_slot(va_page);
mutex_lock(&encl->lock);
list_add(&va_page->list, &encl->va_pages);
mutex_unlock(&encl->lock);
}
entry->va_page = va_page;
entry->va_offset = va_offset;
entry->addr = addr;
return 0;
}
static void sgx_mmu_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct sgx_encl *encl =
container_of(mn, struct sgx_encl, mmu_notifier);
mutex_lock(&encl->lock);
encl->flags |= SGX_ENCL_DEAD;
mutex_unlock(&encl->lock);
}
static const struct mmu_notifier_ops sgx_mmu_notifier_ops = {
.release = sgx_mmu_notifier_release,
};
/** /**
* sgx_ioc_enclave_create - handler for SGX_IOC_ENCLAVE_CREATE * sgx_ioc_enclave_create - handler for %SGX_IOC_ENCLAVE_CREATE
* @filep: open file to /dev/sgx * @filep: open file to /dev/sgx
* @cmd: the command value * @cmd: the command value
* @arg: pointer to the struct sgx_enclave_create * @arg: pointer to the &struct sgx_enclave_create
* *
* Creates meta-data for an enclave and executes ENCLS(ECREATE) * Validates SECS attributes, allocates an EPC page for the SECS and performs
* ECREATE.
*
* Return:
* 0 on success,
* system error on failure
*/ */
static long sgx_ioc_enclave_create(struct file *filep, unsigned int cmd, static long sgx_ioc_enclave_create(struct file *filep, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
struct sgx_enclave_create *createp = (struct sgx_enclave_create *)arg; struct sgx_enclave_create *createp = (struct sgx_enclave_create *)arg;
unsigned long src = (unsigned long)createp->src; void __user *src = (void __user *)createp->src;
struct sgx_page_info pginfo; struct sgx_secs *secs;
struct sgx_secinfo secinfo; int ret;
struct sgx_encl *encl = NULL;
struct sgx_secs *secs = NULL;
struct sgx_epc_page *secs_epc;
struct vm_area_struct *vma;
void *secs_vaddr = NULL;
struct file *backing;
struct file *pcmd;
long ret;
secs = kzalloc(sizeof(*secs), GFP_KERNEL); secs = kzalloc(sizeof(*secs), GFP_KERNEL);
if (!secs) if (!secs)
return -ENOMEM; return -ENOMEM;
ret = copy_from_user(secs, (void __user *)src, sizeof(*secs)); ret = copy_from_user(secs, src, sizeof(*secs));
if (ret) { if (ret) {
kfree(secs); kfree(secs);
return ret; return ret;
} }
if (sgx_validate_secs(secs)) { ret = sgx_encl_create(secs);
kfree(secs); kfree(secs);
return -EINVAL;
}
backing = shmem_file_setup("dev/sgx", secs->size + PAGE_SIZE,
VM_NORESERVE);
if (IS_ERR(backing)) {
ret = PTR_ERR(backing);
goto out;
}
pcmd = shmem_file_setup("dev/sgx",
(secs->size + PAGE_SIZE) >> 5,
VM_NORESERVE);
if (IS_ERR(pcmd)) {
fput(backing);
ret = PTR_ERR(pcmd);
goto out;
}
encl = kzalloc(sizeof(*encl), GFP_KERNEL);
if (!encl) {
fput(backing);
fput(pcmd);
ret = -ENOMEM;
goto out;
}
kref_init(&encl->refcount);
INIT_LIST_HEAD(&encl->add_page_reqs);
INIT_LIST_HEAD(&encl->va_pages);
INIT_RADIX_TREE(&encl->page_tree, GFP_KERNEL);
INIT_LIST_HEAD(&encl->load_list);
INIT_LIST_HEAD(&encl->encl_list);
mutex_init(&encl->lock);
INIT_WORK(&encl->add_page_work, sgx_add_page_worker);
encl->mm = current->mm;
encl->base = secs->base;
encl->size = secs->size;
encl->backing = backing;
encl->pcmd = pcmd;
secs_epc = sgx_alloc_page(0);
if (IS_ERR(secs_epc)) {
ret = PTR_ERR(secs_epc);
secs_epc = NULL;
goto out;
}
ret = sgx_add_to_tgid_ctx(encl);
if (ret)
goto out;
ret = sgx_init_page(encl, &encl->secs_page,
encl->base + encl->size);
if (ret)
goto out;
secs_vaddr = sgx_get_page(secs_epc);
pginfo.srcpge = (unsigned long)secs;
pginfo.linaddr = 0;
pginfo.secinfo = (unsigned long)&secinfo;
pginfo.secs = 0;
memset(&secinfo, 0, sizeof(secinfo));
ret = __ecreate((void *)&pginfo, secs_vaddr);
sgx_put_page(secs_vaddr);
if (ret) {
sgx_dbg(encl, "ECREATE returned %ld\n", ret);
ret = -EFAULT;
goto out;
}
encl->secs_page.epc_page = secs_epc;
createp->src = (unsigned long)encl->base;
if (secs->flags & SGX_SECS_A_DEBUG)
encl->flags |= SGX_ENCL_DEBUG;
encl->mmu_notifier.ops = &sgx_mmu_notifier_ops;
ret = mmu_notifier_register(&encl->mmu_notifier, encl->mm);
if (ret) {
encl->mmu_notifier.ops = NULL;
goto out;
}
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, secs->base);
if (!vma || vma->vm_ops != &sgx_vm_ops ||
vma->vm_start != secs->base ||
vma->vm_end != (secs->base + secs->size)) {
ret = -EINVAL;
up_read(&current->mm->mmap_sem);
goto out;
}
vma->vm_private_data = encl;
up_read(&current->mm->mmap_sem);
mutex_lock(&sgx_tgid_ctx_mutex);
list_add_tail(&encl->encl_list, &encl->tgid_ctx->encl_list);
mutex_unlock(&sgx_tgid_ctx_mutex);
out:
if (ret && encl)
kref_put(&encl->refcount, sgx_encl_release);
kfree(secs);
return ret;
}
static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
{
u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK;
u64 page_type = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
int i;
if ((secinfo->flags & SGX_SECINFO_RESERVED_MASK) ||
((perm & SGX_SECINFO_W) && !(perm & SGX_SECINFO_R)) ||
(page_type != SGX_SECINFO_TCS &&
page_type != SGX_SECINFO_REG))
return -EINVAL;
for (i = 0; i < sizeof(secinfo->reserved) / sizeof(u64); i++)
if (secinfo->reserved[i])
return -EINVAL;
return 0;
}
static int sgx_validate_tcs(struct sgx_tcs *tcs)
{
int i;
/* If FLAGS is not zero, ECALL will fail. */
if ((tcs->flags != 0) ||
(tcs->ossa & (PAGE_SIZE - 1)) ||
(tcs->ofsbase & (PAGE_SIZE - 1)) ||
(tcs->ogsbase & (PAGE_SIZE - 1)) ||
((tcs->fslimit & 0xFFF) != 0xFFF) ||
((tcs->gslimit & 0xFFF) != 0xFFF))
return -EINVAL;
for (i = 0; i < sizeof(tcs->reserved) / sizeof(u64); i++)
if (tcs->reserved[i])
return -EINVAL;
return 0;
}
static int __encl_add_page(struct sgx_encl *encl,
struct sgx_encl_page *encl_page,
struct sgx_enclave_add_page *addp,
struct sgx_secinfo *secinfo)
{
u64 page_type = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
unsigned long src = (unsigned long)addp->src;
struct sgx_tcs *tcs;
struct page *backing;
struct sgx_add_page_req *req = NULL;
int ret;
int empty;
void *user_vaddr;
void *tmp_vaddr;
struct page *tmp_page;
tmp_page = alloc_page(GFP_HIGHUSER);
if (!tmp_page)
return -ENOMEM;
tmp_vaddr = kmap(tmp_page);
ret = copy_from_user((void *)tmp_vaddr, (void __user *)src, PAGE_SIZE);
kunmap(tmp_page);
if (ret) {
__free_page(tmp_page);
return -EFAULT;
}
if (sgx_validate_secinfo(secinfo)) {
__free_page(tmp_page);
return -EINVAL;
}
if (page_type == SGX_SECINFO_TCS) {
tcs = (struct sgx_tcs *)kmap(tmp_page);
ret = sgx_validate_tcs(tcs);
kunmap(tmp_page);
if (ret) {
__free_page(tmp_page);
return ret;
}
}
ret = sgx_init_page(encl, encl_page, addp->addr);
if (ret) {
__free_page(tmp_page);
return -EINVAL;
}
mutex_lock(&encl->lock);
if (encl->flags & (SGX_ENCL_INITIALIZED | SGX_ENCL_DEAD)) {
ret = -EINVAL;
goto out;
}
if (radix_tree_lookup(&encl->page_tree, addp->addr >> PAGE_SHIFT)) {
ret = -EEXIST;
goto out;
}
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req) {
ret = -ENOMEM;
goto out;
}
backing = sgx_get_backing(encl, encl_page, false);
if (IS_ERR((void *)backing)) {
ret = PTR_ERR((void *)backing);
goto out;
}
ret = radix_tree_insert(&encl->page_tree, encl_page->addr >> PAGE_SHIFT,
encl_page);
if (ret) {
sgx_put_backing(backing, false /* write */);
goto out;
}
user_vaddr = kmap(backing);
tmp_vaddr = kmap(tmp_page);
memcpy(user_vaddr, tmp_vaddr, PAGE_SIZE);
kunmap(backing);
kunmap(tmp_page);
if (page_type == SGX_SECINFO_TCS)
encl_page->flags |= SGX_ENCL_PAGE_TCS;
memcpy(&req->secinfo, secinfo, sizeof(*secinfo));
req->encl = encl;
req->encl_page = encl_page;
req->mrmask = addp->mrmask;
empty = list_empty(&encl->add_page_reqs);
kref_get(&encl->refcount);
list_add_tail(&req->list, &encl->add_page_reqs);
if (empty)
queue_work(sgx_add_page_wq, &encl->add_page_work);
sgx_put_backing(backing, true /* write */);
out:
if (ret) {
kfree(req);
sgx_free_va_slot(encl_page->va_page,
encl_page->va_offset);
}
mutex_unlock(&encl->lock);
__free_page(tmp_page);
return ret; return ret;
} }
/** /**
* sgx_ioc_enclave_add_page - handler for SGX_IOC_ENCLAVE_ADD_PAGE * sgx_ioc_enclave_add_page - handler for %SGX_IOC_ENCLAVE_ADD_PAGE
* *
* @filep: open file to /dev/sgx * @filep: open file to /dev/sgx
* @cmd: the command value * @cmd: the command value
* @arg: pointer to the struct sgx_enclave_add_page * @arg: pointer to the &struct sgx_enclave_add_page
* *
* Creates meta-data for an enclave page and enqueues ENCLS(EADD) that will * Creates a new enclave page and enqueues an EADD operation that will be
* be processed by a worker thread later on. * processed by a worker thread later on.
*
* Return:
* 0 on success,
* system error on failure
*/ */
static long sgx_ioc_enclave_add_page(struct file *filep, unsigned int cmd, static long sgx_ioc_enclave_add_page(struct file *filep, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
struct sgx_enclave_add_page *addp = (void *)arg; struct sgx_enclave_add_page *addp = (void *)arg;
unsigned long secinfop = (unsigned long)addp->secinfo; unsigned long secinfop = (unsigned long)addp->secinfo;
struct sgx_encl *encl;
struct sgx_encl_page *page;
struct sgx_secinfo secinfo; struct sgx_secinfo secinfo;
struct sgx_encl *encl;
struct page *data_page;
void *data;
int ret; int ret;
if (addp->addr & (PAGE_SIZE - 1)) ret = sgx_get_encl(addp->addr, &encl);
return -EINVAL;
if (copy_from_user(&secinfo, (void __user *)secinfop, sizeof(secinfo)))
return -EFAULT;
ret = sgx_find_and_get_encl(addp->addr, &encl);
if (ret) if (ret)
return ret; return ret;
if (addp->addr < encl->base || if (copy_from_user(&secinfo, (void __user *)secinfop,
addp->addr > (encl->base + encl->size - PAGE_SIZE)) { sizeof(secinfo))) {
kref_put(&encl->refcount, sgx_encl_release); kref_put(&encl->refcount, sgx_encl_release);
return -EINVAL; return -EFAULT;
} }
page = kzalloc(sizeof(*page), GFP_KERNEL); data_page = alloc_page(GFP_HIGHUSER);
if (!page) { if (!data_page) {
kref_put(&encl->refcount, sgx_encl_release); kref_put(&encl->refcount, sgx_encl_release);
return -ENOMEM; return -ENOMEM;
} }
ret = __encl_add_page(encl, page, addp, &secinfo); data = kmap(data_page);
kref_put(&encl->refcount, sgx_encl_release);
ret = copy_from_user((void *)data, (void __user *)addp->src, PAGE_SIZE);
if (ret) if (ret)
kfree(page);
return ret;
}
static int __sgx_encl_init(struct sgx_encl *encl, char *sigstruct,
struct sgx_einittoken *einittoken)
{
int ret = SGX_UNMASKED_EVENT;
struct sgx_epc_page *secs_epc = encl->secs_page.epc_page;
void *secs_va = NULL;
int i;
int j;
if (einittoken->valid && einittoken->isvsvnle < sgx_isvsvnle_min)
return SGX_LE_ROLLBACK;
for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
mutex_lock(&encl->lock);
secs_va = sgx_get_page(secs_epc);
ret = __einit(sigstruct, einittoken, secs_va);
sgx_put_page(secs_va);
mutex_unlock(&encl->lock);
if (ret == SGX_UNMASKED_EVENT)
continue;
else
break;
}
if (ret != SGX_UNMASKED_EVENT)
goto out; goto out;
msleep_interruptible(SGX_EINIT_SLEEP_TIME); ret = sgx_encl_add_page(encl, addp->addr, data, &secinfo, addp->mrmask);
if (signal_pending(current)) if (ret)
return -EINTR; goto out;
}
out: out:
if (ret) { kref_put(&encl->refcount, sgx_encl_release);
sgx_dbg(encl, "EINIT returned %d\n", ret); kunmap(data_page);
} else { __free_page(data_page);
encl->flags |= SGX_ENCL_INITIALIZED;
if (einittoken->isvsvnle > sgx_isvsvnle_min)
sgx_isvsvnle_min = einittoken->isvsvnle;
}
return ret; return ret;
} }
/** /**
* sgx_ioc_enclave_init - handler for SGX_IOC_ENCLAVE_INIT * sgx_ioc_enclave_init - handler for %SGX_IOC_ENCLAVE_INIT
* *
* @filep: open file to /dev/sgx * @filep: open file to /dev/sgx
* @cmd: the command value * @cmd: the command value
* @arg: pointer to the struct sgx_enclave_init * @arg: pointer to the &struct sgx_enclave_init
* *
* Flushes the remaining enqueued ENCLS(EADD) operations and executes * Flushes the remaining enqueued EADD operations and performs EINIT.
* ENCLS(EINIT). Does a number of retries because EINIT might fail because of an *
* interrupt storm. * Return:
* 0 on success,
* system error on failure
*/ */
static long sgx_ioc_enclave_init(struct file *filep, unsigned int cmd, static long sgx_ioc_enclave_init(struct file *filep, unsigned int cmd,
unsigned long arg) unsigned long arg)
@ -880,7 +213,7 @@ static long sgx_ioc_enclave_init(struct file *filep, unsigned int cmd,
unsigned long sigstructp = (unsigned long)initp->sigstruct; unsigned long sigstructp = (unsigned long)initp->sigstruct;
unsigned long einittokenp = (unsigned long)initp->einittoken; unsigned long einittokenp = (unsigned long)initp->einittoken;
unsigned long encl_id = initp->addr; unsigned long encl_id = initp->addr;
char *sigstruct; struct sgx_sigstruct *sigstruct;
struct sgx_einittoken *einittoken; struct sgx_einittoken *einittoken;
struct sgx_encl *encl; struct sgx_encl *encl;
struct page *initp_page; struct page *initp_page;
@ -895,33 +228,24 @@ static long sgx_ioc_enclave_init(struct file *filep, unsigned int cmd,
((unsigned long)sigstruct + PAGE_SIZE / 2); ((unsigned long)sigstruct + PAGE_SIZE / 2);
ret = copy_from_user(sigstruct, (void __user *)sigstructp, ret = copy_from_user(sigstruct, (void __user *)sigstructp,
SIGSTRUCT_SIZE); sizeof(*sigstruct));
if (ret) if (ret)
goto out_free_page; goto out;
ret = copy_from_user(einittoken, (void __user *)einittokenp, ret = copy_from_user(einittoken, (void __user *)einittokenp,
EINITTOKEN_SIZE); sizeof(*einittoken));
if (ret) if (ret)
goto out_free_page;
ret = sgx_find_and_get_encl(encl_id, &encl);
if (ret)
goto out_free_page;
mutex_lock(&encl->lock);
if (encl->flags & SGX_ENCL_INITIALIZED) {
ret = -EINVAL;
mutex_unlock(&encl->lock);
goto out; goto out;
}
mutex_unlock(&encl->lock);
flush_work(&encl->add_page_work); ret = sgx_get_encl(encl_id, &encl);
if (ret)
goto out;
ret = sgx_encl_init(encl, sigstruct, einittoken);
ret = __sgx_encl_init(encl, sigstruct, einittoken);
out:
kref_put(&encl->refcount, sgx_encl_release); kref_put(&encl->refcount, sgx_encl_release);
out_free_page:
out:
kunmap(initp_page); kunmap(initp_page);
__free_page(initp_page); __free_page(initp_page);
return ret; return ret;

View File

@ -4,7 +4,7 @@
* *
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016-2017 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -21,7 +21,7 @@
* *
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016-2017 Intel Corporation.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@ -58,6 +58,7 @@
* Sean Christopherson <sean.j.christopherson@intel.com> * Sean Christopherson <sean.j.christopherson@intel.com>
*/ */
#include "asm/msr-index.h"
#include "sgx.h" #include "sgx.h"
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/file.h> #include <linux/file.h>
@ -72,16 +73,15 @@
#define DRV_DESCRIPTION "Intel SGX Driver" #define DRV_DESCRIPTION "Intel SGX Driver"
#define DRV_VERSION "0.10" #define DRV_VERSION "0.10"
#define ENCL_SIZE_MAX_64 (64ULL * 1024ULL * 1024ULL * 1024ULL)
#define ENCL_SIZE_MAX_32 (2ULL * 1024ULL * 1024ULL * 1024ULL)
MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>"); MODULE_AUTHOR("Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>");
MODULE_VERSION(DRV_VERSION); MODULE_VERSION(DRV_VERSION);
#ifndef X86_FEATURE_SGX #ifndef X86_FEATURE_SGX
#define X86_FEATURE_SGX (9 * 32 + 2) #define X86_FEATURE_SGX (9 * 32 + 2)
#endif #endif
#define FEATURE_CONTROL_SGX_ENABLE (1<<18)
/* /*
* Global data. * Global data.
*/ */
@ -90,11 +90,11 @@ struct workqueue_struct *sgx_add_page_wq;
#define SGX_MAX_EPC_BANKS 8 #define SGX_MAX_EPC_BANKS 8
struct sgx_epc_bank sgx_epc_banks[SGX_MAX_EPC_BANKS]; struct sgx_epc_bank sgx_epc_banks[SGX_MAX_EPC_BANKS];
int sgx_nr_epc_banks; int sgx_nr_epc_banks;
u64 sgx_encl_size_max_32 = ENCL_SIZE_MAX_32; u64 sgx_encl_size_max_32;
u64 sgx_encl_size_max_64 = ENCL_SIZE_MAX_64; u64 sgx_encl_size_max_64;
u64 sgx_xfrm_mask = 0x3; u64 sgx_xfrm_mask = 0x3;
u32 sgx_ssaframesize_tbl[64]; u32 sgx_misc_reserved;
bool sgx_has_sgx2; u32 sgx_xsave_size_tbl[64];
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
long sgx_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) long sgx_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
@ -159,91 +159,17 @@ static const struct file_operations sgx_fops = {
}; };
static struct miscdevice sgx_dev = { static struct miscdevice sgx_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "isgx", .name = "isgx",
.fops = &sgx_fops, .fops = &sgx_fops,
.mode = 0666, .mode = 0666,
}; };
static int sgx_init_platform(void)
{
unsigned int eax, ebx, ecx, edx;
unsigned long size;
int i;
cpuid(0, &eax, &ebx, &ecx, &edx);
if (eax < SGX_CPUID) {
pr_err("intel_sgx: CPUID is missing the SGX leaf instruction\n");
return -ENODEV;
}
if (!boot_cpu_has(X86_FEATURE_SGX)) {
pr_err("intel_sgx: CPU is missing the SGX feature\n");
return -ENODEV;
}
cpuid_count(SGX_CPUID, 0x0, &eax, &ebx, &ecx, &edx);
if (!(eax & 1)) {
pr_err("intel_sgx: CPU does not support the SGX 1.0 instruction set\n");
return -ENODEV;
}
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
cpuid_count(SGX_CPUID, 0x1, &eax, &ebx, &ecx, &edx);
sgx_xfrm_mask = (((u64)edx) << 32) + (u64)ecx;
for (i = 2; i < 64; i++) {
cpuid_count(0x0D, i, &eax, &ebx, &ecx, &edx);
if ((1 << i) & sgx_xfrm_mask)
sgx_ssaframesize_tbl[i] =
(168 + eax + ebx + PAGE_SIZE - 1) /
PAGE_SIZE;
}
}
cpuid_count(SGX_CPUID, 0x0, &eax, &ebx, &ecx, &edx);
if (edx & 0xFFFF) {
#ifdef CONFIG_X86_64
sgx_encl_size_max_64 = 1ULL << ((edx >> 8) & 0xFF);
#endif
sgx_encl_size_max_32 = 1ULL << (edx & 0xFF);
}
sgx_nr_epc_banks = 0;
do {
cpuid_count(SGX_CPUID, sgx_nr_epc_banks + 2,
&eax, &ebx, &ecx, &edx);
if (eax & 0xf) {
sgx_epc_banks[sgx_nr_epc_banks].start =
(((u64) (ebx & 0xfffff)) << 32) +
(u64) (eax & 0xfffff000);
size = (((u64) (edx & 0xfffff)) << 32) +
(u64) (ecx & 0xfffff000);
sgx_epc_banks[sgx_nr_epc_banks].end =
sgx_epc_banks[sgx_nr_epc_banks].start + size;
if (!sgx_epc_banks[sgx_nr_epc_banks].start)
return -ENODEV;
sgx_nr_epc_banks++;
} else {
break;
}
} while (sgx_nr_epc_banks < SGX_MAX_EPC_BANKS);
/* There should be at least one EPC area or something is wrong. */
if (!sgx_nr_epc_banks) {
WARN_ON(1);
return 1;
}
return 0;
}
static int sgx_pm_suspend(struct device *dev) static int sgx_pm_suspend(struct device *dev)
{ {
struct sgx_tgid_ctx *ctx; struct sgx_tgid_ctx *ctx;
struct sgx_encl *encl; struct sgx_encl *encl;
kthread_stop(ksgxswapd_tsk);
ksgxswapd_tsk = NULL;
list_for_each_entry(ctx, &sgx_tgid_ctx_list, list) { list_for_each_entry(ctx, &sgx_tgid_ctx_list, list) {
list_for_each_entry(encl, &ctx->encl_list, encl_list) { list_for_each_entry(encl, &ctx->encl_list, encl_list) {
sgx_invalidate(encl, false); sgx_invalidate(encl, false);
@ -255,134 +181,150 @@ static int sgx_pm_suspend(struct device *dev)
return 0; return 0;
} }
static int sgx_pm_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(sgx_drv_pm, sgx_pm_suspend, NULL);
{
ksgxswapd_tsk = kthread_run(ksgxswapd, NULL, "kswapd");
return 0;
}
static SIMPLE_DEV_PM_OPS(sgx_drv_pm, sgx_pm_suspend, sgx_pm_resume); static int sgx_dev_init(struct device *parent)
static int sgx_dev_init(struct device *dev)
{ {
unsigned int wq_flags; unsigned int eax, ebx, ecx, edx;
unsigned long pa;
unsigned long size;
int ret; int ret;
int i; int i;
pr_info("intel_sgx: " DRV_DESCRIPTION " v" DRV_VERSION "\n"); pr_info("intel_sgx: " DRV_DESCRIPTION " v" DRV_VERSION "\n");
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) cpuid_count(SGX_CPUID, SGX_CPUID_CAPABILITIES, &eax, &ebx, &ecx, &edx);
return -ENODEV; /* Only allow misc bits supported by the driver. */
sgx_misc_reserved = ~ebx | SGX_MISC_RESERVED_MASK;
#ifdef CONFIG_X86_64
sgx_encl_size_max_64 = 1ULL << ((edx >> 8) & 0xFF);
#endif
sgx_encl_size_max_32 = 1ULL << (edx & 0xFF);
ret = sgx_init_platform(); if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
if (ret) cpuid_count(SGX_CPUID, SGX_CPUID_ATTRIBUTES, &eax, &ebx, &ecx,
return ret; &edx);
sgx_xfrm_mask = (((u64)edx) << 32) + (u64)ecx;
pr_info("intel_sgx: Number of EPCs %d\n", sgx_nr_epc_banks); for (i = 2; i < 64; i++) {
cpuid_count(0x0D, i, &eax, &ebx, &ecx, &edx);
if ((1 << i) & sgx_xfrm_mask)
sgx_xsave_size_tbl[i] = eax + ebx;
}
}
for (i = 0; i < SGX_MAX_EPC_BANKS; i++) {
cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC_BANKS, &eax, &ebx,
&ecx, &edx);
if (!(eax & 0xf))
break;
pa = ((u64)(ebx & 0xfffff) << 32) + (u64)(eax & 0xfffff000);
size = ((u64)(edx & 0xfffff) << 32) + (u64)(ecx & 0xfffff000);
dev_info(parent, "EPC bank 0x%lx-0x%lx\n", pa, pa + size);
sgx_epc_banks[i].pa = pa;
sgx_epc_banks[i].size = size;
}
sgx_nr_epc_banks = i;
for (i = 0; i < sgx_nr_epc_banks; i++) { for (i = 0; i < sgx_nr_epc_banks; i++) {
pr_info("intel_sgx: EPC memory range 0x%lx-0x%lx\n",
sgx_epc_banks[i].start, sgx_epc_banks[i].end);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
sgx_epc_banks[i].mem = ioremap_cache(sgx_epc_banks[i].start, sgx_epc_banks[i].va = (unsigned long)
sgx_epc_banks[i].end - sgx_epc_banks[i].start); ioremap_cache(sgx_epc_banks[i].pa,
if (!sgx_epc_banks[i].mem) { sgx_epc_banks[i].size);
if (!sgx_epc_banks[i].va) {
sgx_nr_epc_banks = i; sgx_nr_epc_banks = i;
ret = -ENOMEM; ret = -ENOMEM;
goto out_iounmap; goto out_iounmap;
} }
#endif #endif
ret = sgx_page_cache_init(sgx_epc_banks[i].start, ret = sgx_add_epc_bank(sgx_epc_banks[i].pa,
sgx_epc_banks[i].end - sgx_epc_banks[i].start); sgx_epc_banks[i].size, i);
if (ret) { if (ret) {
sgx_nr_epc_banks = i+1; sgx_nr_epc_banks = i + 1;
goto out_iounmap; goto out_iounmap;
} }
} }
wq_flags = WQ_UNBOUND | WQ_FREEZABLE; ret = sgx_page_cache_init();
#ifdef WQ_NON_REENETRANT if (ret)
wq_flags |= WQ_NON_REENTRANT; goto out_iounmap;
#endif
sgx_add_page_wq = alloc_workqueue("intel_sgx-add-page-wq", wq_flags, 1); sgx_add_page_wq = alloc_workqueue("intel_sgx-add-page-wq",
WQ_UNBOUND | WQ_FREEZABLE, 1);
if (!sgx_add_page_wq) { if (!sgx_add_page_wq) {
pr_err("intel_sgx: alloc_workqueue() failed\n"); pr_err("intel_sgx: alloc_workqueue() failed\n");
ret = -ENOMEM; ret = -ENOMEM;
goto out_iounmap; goto out_iounmap;
} }
sgx_dev.parent = dev; sgx_dev.parent = parent;
ret = misc_register(&sgx_dev); ret = misc_register(&sgx_dev);
if (ret) { if (ret) {
pr_err("intel_sgx: misc_register() failed\n"); pr_err("intel_sgx: misc_register() failed\n");
goto out_workqueue; goto out_workqueue;
} }
if (ret)
goto out_workqueue;
return 0; return 0;
out_workqueue: out_workqueue:
destroy_workqueue(sgx_add_page_wq); destroy_workqueue(sgx_add_page_wq);
out_iounmap: out_iounmap:
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
for (i = 0; i < sgx_nr_epc_banks; i++) for (i = 0; i < sgx_nr_epc_banks; i++)
iounmap(sgx_epc_banks[i].mem); iounmap((void *)sgx_epc_banks[i].va);
#endif #endif
return ret; return ret;
} }
static atomic_t sgx_init_flag = ATOMIC_INIT(0); static atomic_t sgx_init_flag = ATOMIC_INIT(0);
static int sgx_drv_probe(struct platform_device *pdev) static int sgx_drv_probe(struct platform_device *pdev)
{ {
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
int i; unsigned long fc;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
if (atomic_cmpxchg(&sgx_init_flag, 0, 1)) { if (atomic_cmpxchg(&sgx_init_flag, 0, 1)) {
pr_warn("intel_sgx: second initialization call skipped\n"); pr_warn("intel_sgx: second initialization call skipped\n");
return 0; return 0;
} }
cpuid(0, &eax, &ebx, &ecx, &edx);
if (eax < SGX_CPUID) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
pr_err("intel_sgx: CPUID is missing the SGX leaf instruction\n");
return -ENODEV; return -ENODEV;
}
if (!boot_cpu_has(X86_FEATURE_SGX)) { if (!boot_cpu_has(X86_FEATURE_SGX)) {
pr_err("intel_sgx: CPU is missing the SGX feature\n"); pr_err("intel_sgx: the CPU is missing SGX\n");
return -ENODEV; return -ENODEV;
} }
cpuid_count(SGX_CPUID, 0x0, &eax, &ebx, &ecx, &edx); rdmsrl(MSR_IA32_FEATURE_CONTROL, fc);
if (!(fc & FEATURE_CONTROL_LOCKED)) {
pr_err("intel_sgx: the feature control MSR is not locked\n");
return -ENODEV;
}
if (!(fc & FEATURE_CONTROL_SGX_ENABLE)) {
pr_err("intel_sgx: SGX is not enabled\n");
return -ENODEV;
}
cpuid(0, &eax, &ebx, &ecx, &edx);
if (eax < SGX_CPUID) {
pr_err("intel_sgx: CPUID is missing the SGX leaf\n");
return -ENODEV;
}
cpuid_count(SGX_CPUID, SGX_CPUID_CAPABILITIES, &eax, &ebx, &ecx, &edx);
if (!(eax & 1)) { if (!(eax & 1)) {
pr_err("intel_sgx: CPU does not support the SGX 1.0 instruction set\n"); pr_err("intel_sgx: CPU does not support the SGX1 instructions\n");
return -ENODEV; return -ENODEV;
} }
sgx_has_sgx2 = (eax & 2) != 0;
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
cpuid_count(SGX_CPUID, 0x1, &eax, &ebx, &ecx, &edx);
sgx_xfrm_mask = (((u64)edx) << 32) + (u64)ecx;
for (i = 2; i < 64; i++) {
cpuid_count(0x0D, i, &eax, &ebx, &ecx, &edx);
if ((1 << i) & sgx_xfrm_mask)
sgx_ssaframesize_tbl[i] =
(168 + eax + ebx + PAGE_SIZE - 1) /
PAGE_SIZE;
}
}
cpuid_count(SGX_CPUID, 0x0, &eax, &ebx, &ecx, &edx);
if (edx & 0xFFFF) {
#ifdef CONFIG_X86_64
sgx_encl_size_max_64 = 2ULL << (edx & 0xFF);
#endif
sgx_encl_size_max_32 = 2ULL << ((edx >> 8) & 0xFF);
}
return sgx_dev_init(&pdev->dev); return sgx_dev_init(&pdev->dev);
} }
@ -396,10 +338,11 @@ static int sgx_drv_remove(struct platform_device *pdev)
} }
misc_deregister(&sgx_dev); misc_deregister(&sgx_dev);
destroy_workqueue(sgx_add_page_wq); destroy_workqueue(sgx_add_page_wq);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
for (i = 0; i < sgx_nr_epc_banks; i++) for (i = 0; i < sgx_nr_epc_banks; i++)
iounmap(sgx_epc_banks[i].mem); iounmap((void *)sgx_epc_banks[i].va);
#endif #endif
sgx_page_cache_teardown(); sgx_page_cache_teardown();
@ -424,9 +367,6 @@ static struct platform_driver sgx_drv = {
}, },
}; };
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
module_platform_driver(sgx_drv);
#else
static struct platform_device *pdev; static struct platform_device *pdev;
int init_sgx_module(void) int init_sgx_module(void)
{ {
@ -446,6 +386,5 @@ void cleanup_sgx_module(void)
module_init(init_sgx_module); module_init(init_sgx_module);
module_exit(cleanup_sgx_module); module_exit(cleanup_sgx_module);
#endif
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");

View File

@ -4,7 +4,7 @@
* *
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016-2017 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -21,7 +21,7 @@
* *
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016-2017 Intel Corporation.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@ -63,7 +63,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#else #else
#include <linux/signal.h> #include <linux/signal.h>
@ -78,14 +78,14 @@ static DEFINE_SPINLOCK(sgx_free_list_lock);
LIST_HEAD(sgx_tgid_ctx_list); LIST_HEAD(sgx_tgid_ctx_list);
DEFINE_MUTEX(sgx_tgid_ctx_mutex); DEFINE_MUTEX(sgx_tgid_ctx_mutex);
atomic_t sgx_va_pages_cnt = ATOMIC_INIT(0);
static unsigned int sgx_nr_total_epc_pages; static unsigned int sgx_nr_total_epc_pages;
static unsigned int sgx_nr_free_pages; static unsigned int sgx_nr_free_pages;
static unsigned int sgx_nr_low_pages = SGX_NR_LOW_EPC_PAGES_DEFAULT; static unsigned int sgx_nr_low_pages = SGX_NR_LOW_EPC_PAGES_DEFAULT;
static unsigned int sgx_nr_high_pages; static unsigned int sgx_nr_high_pages;
struct task_struct *ksgxswapd_tsk; static struct task_struct *ksgxswapd_tsk;
static DECLARE_WAIT_QUEUE_HEAD(ksgxswapd_waitq); static DECLARE_WAIT_QUEUE_HEAD(ksgxswapd_waitq);
static int sgx_test_and_clear_young_cb(pte_t *ptep, pgtable_t token, static int sgx_test_and_clear_young_cb(pte_t *ptep, pgtable_t token,
unsigned long addr, void *data) unsigned long addr, void *data)
{ {
@ -112,9 +112,14 @@ static int sgx_test_and_clear_young_cb(pte_t *ptep, pgtable_t token,
*/ */
int sgx_test_and_clear_young(struct sgx_encl_page *page, struct sgx_encl *encl) int sgx_test_and_clear_young(struct sgx_encl_page *page, struct sgx_encl *encl)
{ {
struct vm_area_struct *vma = sgx_find_vma(encl, page->addr); struct vm_area_struct *vma;
int ret;
if (!vma) ret = sgx_encl_find(encl->mm, page->addr, &vma);
if (ret)
return 0;
if (encl != vma->vm_private_data)
return 0; return 0;
return apply_to_page_range(vma->vm_mm, page->addr, PAGE_SIZE, return apply_to_page_range(vma->vm_mm, page->addr, PAGE_SIZE,
@ -197,7 +202,7 @@ static void sgx_isolate_pages(struct sgx_encl *encl,
struct list_head *dst, struct list_head *dst,
unsigned long nr_to_scan) unsigned long nr_to_scan)
{ {
struct sgx_encl_page *entry; struct sgx_epc_page *entry;
int i; int i;
mutex_lock(&encl->lock); mutex_lock(&encl->lock);
@ -210,57 +215,25 @@ static void sgx_isolate_pages(struct sgx_encl *encl,
break; break;
entry = list_first_entry(&encl->load_list, entry = list_first_entry(&encl->load_list,
struct sgx_encl_page, struct sgx_epc_page,
load_list); list);
if (!sgx_test_and_clear_young(entry, encl) && if (!sgx_test_and_clear_young(entry->encl_page, encl) &&
!(entry->flags & SGX_ENCL_PAGE_RESERVED)) { !(entry->encl_page->flags & SGX_ENCL_PAGE_RESERVED)) {
entry->flags |= SGX_ENCL_PAGE_RESERVED; entry->encl_page->flags |= SGX_ENCL_PAGE_RESERVED;
list_move_tail(&entry->load_list, dst); list_move_tail(&entry->list, dst);
} else { } else {
list_move_tail(&entry->load_list, &encl->load_list); list_move_tail(&entry->list, &encl->load_list);
} }
} }
out: out:
mutex_unlock(&encl->lock); mutex_unlock(&encl->lock);
} }
static void sgx_eblock(struct sgx_encl *encl,
struct sgx_epc_page *epc_page)
{
void *vaddr;
int ret;
vaddr = sgx_get_page(epc_page);
ret = __eblock((unsigned long)vaddr);
sgx_put_page(vaddr);
if (ret) {
sgx_crit(encl, "EBLOCK returned %d\n", ret);
sgx_invalidate(encl, true);
}
}
static void sgx_etrack(struct sgx_encl *encl)
{
void *epc;
int ret;
epc = sgx_get_page(encl->secs_page.epc_page);
ret = __etrack(epc);
sgx_put_page(epc);
if (ret) {
sgx_crit(encl, "ETRACK returned %d\n", ret);
sgx_invalidate(encl, true);
}
}
static int __sgx_ewb(struct sgx_encl *encl, static int __sgx_ewb(struct sgx_encl *encl,
struct sgx_encl_page *encl_page) struct sgx_encl_page *encl_page)
{ {
struct sgx_page_info pginfo; struct sgx_pageinfo pginfo;
struct page *backing; struct page *backing;
struct page *pcmd; struct page *pcmd;
unsigned long pcmd_offset; unsigned long pcmd_offset;
@ -340,25 +313,25 @@ static void sgx_evict_page(struct sgx_encl_page *entry,
static void sgx_write_pages(struct sgx_encl *encl, struct list_head *src) static void sgx_write_pages(struct sgx_encl *encl, struct list_head *src)
{ {
struct sgx_encl_page *entry; struct sgx_epc_page *entry;
struct sgx_encl_page *tmp; struct sgx_epc_page *tmp;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int ret;
if (list_empty(src)) if (list_empty(src))
return; return;
entry = list_first_entry(src, struct sgx_encl_page, load_list); entry = list_first_entry(src, struct sgx_epc_page, list);
mutex_lock(&encl->lock); mutex_lock(&encl->lock);
/* EBLOCK */ /* EBLOCK */
list_for_each_entry_safe(entry, tmp, src, load_list) { list_for_each_entry_safe(entry, tmp, src, list) {
vma = sgx_find_vma(encl, entry->addr); ret = sgx_encl_find(encl->mm, entry->encl_page->addr, &vma);
if (vma) { if (!ret && encl == vma->vm_private_data)
zap_vma_ptes(vma, entry->addr, PAGE_SIZE); zap_vma_ptes(vma, entry->encl_page->addr, PAGE_SIZE);
}
sgx_eblock(encl, entry->epc_page); sgx_eblock(encl, entry);
} }
/* ETRACK */ /* ETRACK */
@ -366,15 +339,14 @@ static void sgx_write_pages(struct sgx_encl *encl, struct list_head *src)
/* EWB */ /* EWB */
while (!list_empty(src)) { while (!list_empty(src)) {
entry = list_first_entry(src, struct sgx_encl_page, entry = list_first_entry(src, struct sgx_epc_page, list);
load_list); list_del(&entry->list);
list_del(&entry->load_list); sgx_evict_page(entry->encl_page, encl);
sgx_evict_page(entry, encl);
encl->secs_child_cnt--; encl->secs_child_cnt--;
} }
if (!encl->secs_child_cnt && (encl->flags & SGX_ENCL_INITIALIZED)) { if (!encl->secs_child_cnt && (encl->flags & SGX_ENCL_INITIALIZED)) {
sgx_evict_page(&encl->secs_page, encl); sgx_evict_page(&encl->secs, encl);
encl->flags |= SGX_ENCL_SECS_EVICTED; encl->flags |= SGX_ENCL_SECS_EVICTED;
} }
@ -405,10 +377,15 @@ out:
kref_put(&ctx->refcount, sgx_tgid_ctx_release); kref_put(&ctx->refcount, sgx_tgid_ctx_release);
} }
int ksgxswapd(void *p) static int ksgxswapd(void *p)
{ {
set_freezable();
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
wait_event_interruptible(ksgxswapd_waitq, if (try_to_freeze())
continue;
wait_event_freezable(ksgxswapd_waitq,
kthread_should_stop() || kthread_should_stop() ||
sgx_nr_free_pages < sgx_nr_high_pages); sgx_nr_free_pages < sgx_nr_high_pages);
@ -420,7 +397,7 @@ int ksgxswapd(void *p)
return 0; return 0;
} }
int sgx_page_cache_init(resource_size_t start, unsigned long size) int sgx_add_epc_bank(resource_size_t start, unsigned long size, int bank)
{ {
unsigned long i; unsigned long i;
struct sgx_epc_page *new_epc_page, *entry; struct sgx_epc_page *new_epc_page, *entry;
@ -430,42 +407,53 @@ int sgx_page_cache_init(resource_size_t start, unsigned long size)
new_epc_page = kzalloc(sizeof(*new_epc_page), GFP_KERNEL); new_epc_page = kzalloc(sizeof(*new_epc_page), GFP_KERNEL);
if (!new_epc_page) if (!new_epc_page)
goto err_freelist; goto err_freelist;
new_epc_page->pa = start + i; new_epc_page->pa = (start + i) | bank;
spin_lock(&sgx_free_list_lock); spin_lock(&sgx_free_list_lock);
list_add_tail(&new_epc_page->free_list, &sgx_free_list); list_add_tail(&new_epc_page->list, &sgx_free_list);
sgx_nr_total_epc_pages++; sgx_nr_total_epc_pages++;
sgx_nr_free_pages++; sgx_nr_free_pages++;
spin_unlock(&sgx_free_list_lock); spin_unlock(&sgx_free_list_lock);
} }
sgx_nr_high_pages = 2 * sgx_nr_low_pages;
ksgxswapd_tsk = kthread_run(ksgxswapd, NULL, "ksgxswapd");
return 0; return 0;
err_freelist: err_freelist:
list_for_each_safe(parser, temp, &sgx_free_list) { list_for_each_safe(parser, temp, &sgx_free_list) {
spin_lock(&sgx_free_list_lock); spin_lock(&sgx_free_list_lock);
entry = list_entry(parser, struct sgx_epc_page, free_list); entry = list_entry(parser, struct sgx_epc_page, list);
list_del(&entry->free_list); list_del(&entry->list);
spin_unlock(&sgx_free_list_lock); spin_unlock(&sgx_free_list_lock);
kfree(entry); kfree(entry);
} }
return -ENOMEM; return -ENOMEM;
} }
int sgx_page_cache_init(void)
{
struct task_struct *tmp;
sgx_nr_high_pages = 2 * sgx_nr_low_pages;
tmp = kthread_run(ksgxswapd, NULL, "ksgxswapd");
if (!IS_ERR(tmp))
ksgxswapd_tsk = tmp;
return PTR_ERR_OR_ZERO(tmp);
}
void sgx_page_cache_teardown(void) void sgx_page_cache_teardown(void)
{ {
struct sgx_epc_page *entry; struct sgx_epc_page *entry;
struct list_head *parser, *temp; struct list_head *parser, *temp;
if (ksgxswapd_tsk) if (ksgxswapd_tsk) {
kthread_stop(ksgxswapd_tsk); kthread_stop(ksgxswapd_tsk);
ksgxswapd_tsk = NULL;
}
spin_lock(&sgx_free_list_lock); spin_lock(&sgx_free_list_lock);
list_for_each_safe(parser, temp, &sgx_free_list) { list_for_each_safe(parser, temp, &sgx_free_list) {
entry = list_entry(parser, struct sgx_epc_page, free_list); entry = list_entry(parser, struct sgx_epc_page, list);
list_del(&entry->free_list); list_del(&entry->list);
kfree(entry); kfree(entry);
} }
spin_unlock(&sgx_free_list_lock); spin_unlock(&sgx_free_list_lock);
@ -479,8 +467,8 @@ static struct sgx_epc_page *sgx_alloc_page_fast(void)
if (!list_empty(&sgx_free_list)) { if (!list_empty(&sgx_free_list)) {
entry = list_first_entry(&sgx_free_list, struct sgx_epc_page, entry = list_first_entry(&sgx_free_list, struct sgx_epc_page,
free_list); list);
list_del(&entry->free_list); list_del(&entry->list);
sgx_nr_free_pages--; sgx_nr_free_pages--;
} }
@ -510,6 +498,11 @@ struct sgx_epc_page *sgx_alloc_page(unsigned int flags)
if (entry) if (entry)
break; break;
/* We need at minimum two pages for the #PF handler. */
if (atomic_read(&sgx_va_pages_cnt) >
(sgx_nr_total_epc_pages - 2))
return ERR_PTR(-ENOMEM);
if (flags & SGX_ALLOC_ATOMIC) { if (flags & SGX_ALLOC_ATOMIC) {
entry = ERR_PTR(-EBUSY); entry = ERR_PTR(-EBUSY);
break; break;
@ -529,26 +522,18 @@ struct sgx_epc_page *sgx_alloc_page(unsigned int flags)
return entry; return entry;
} }
EXPORT_SYMBOL(sgx_alloc_page);
/** /**
* sgx_free_page - free an EPC page * sgx_free_page - free an EPC page
* *
* EREMOVE an EPC page and insert it back to the list of free pages. Optionally, * EREMOVE an EPC page and insert it back to the list of free pages.
* an enclave can be given as a parameter. If the enclave is given, the * If EREMOVE fails, the error is printed out loud as a critical error.
* resulting error is printed out loud as a critical error. It is an indicator * It is an indicator of a driver bug if that would happen.
* of a driver bug if that would happen.
* *
* If the enclave is not given as a parameter (like in the case when VMM uses * @entry: any EPC page
* this function)), it is fully up to the caller to deal with the return value, * @encl: enclave that owns the given EPC page
* including printing it to the klog if it wants to do such a thing.
*
* @entry: an EPC page
* @encl: the enclave who owns the EPC page (optional)
*
* Return: SGX error code
*/ */
int sgx_free_page(struct sgx_epc_page *entry, struct sgx_encl *encl) void sgx_free_page(struct sgx_epc_page *entry, struct sgx_encl *encl)
{ {
void *epc; void *epc;
int ret; int ret;
@ -557,41 +542,26 @@ int sgx_free_page(struct sgx_epc_page *entry, struct sgx_encl *encl)
ret = __eremove(epc); ret = __eremove(epc);
sgx_put_page(epc); sgx_put_page(epc);
if (ret) { if (ret)
if (encl)
sgx_crit(encl, "EREMOVE returned %d\n", ret); sgx_crit(encl, "EREMOVE returned %d\n", ret);
return ret;
}
spin_lock(&sgx_free_list_lock); spin_lock(&sgx_free_list_lock);
list_add(&entry->free_list, &sgx_free_list); list_add(&entry->list, &sgx_free_list);
sgx_nr_free_pages++; sgx_nr_free_pages++;
spin_unlock(&sgx_free_list_lock); spin_unlock(&sgx_free_list_lock);
return 0;
} }
EXPORT_SYMBOL(sgx_free_page);
void *sgx_get_page(struct sgx_epc_page *entry) void *sgx_get_page(struct sgx_epc_page *entry)
{ {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
return kmap_atomic_pfn(PFN_DOWN(entry->pa)); return kmap_atomic_pfn(PFN_DOWN(entry->pa));
#else #else
int i; int i = ((entry->pa) & ~PAGE_MASK);
for (i = 0; i < sgx_nr_epc_banks; i++) { return (void *)(sgx_epc_banks[i].va +
if (entry->pa < sgx_epc_banks[i].end && ((entry->pa & PAGE_MASK) - sgx_epc_banks[i].pa));
entry->pa >= sgx_epc_banks[i].start) {
return sgx_epc_banks[i].mem +
(entry->pa - sgx_epc_banks[i].start);
}
}
return NULL;
#endif #endif
} }
EXPORT_SYMBOL(sgx_get_page);
void sgx_put_page(void *epc_page_vaddr) void sgx_put_page(void *epc_page_vaddr)
{ {
@ -600,4 +570,3 @@ void sgx_put_page(void *epc_page_vaddr)
#else #else
#endif #endif
} }
EXPORT_SYMBOL(sgx_put_page);

View File

@ -53,8 +53,6 @@
* *
* Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com> * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
* Suresh Siddha <suresh.b.siddha@intel.com> * Suresh Siddha <suresh.b.siddha@intel.com>
* Serge Ayoun <serge.ayoun@intel.com>
* Shay Katz-zamir <shay.katz-zamir@intel.com>
*/ */
#ifndef _UAPI_ASM_X86_SGX_H #ifndef _UAPI_ASM_X86_SGX_H
@ -89,7 +87,7 @@
#define SGX_CHILD_PRESENT 13 #define SGX_CHILD_PRESENT 13
#define SGX_ENCLAVE_ACT 14 #define SGX_ENCLAVE_ACT 14
#define SGX_ENTRYEPOCH_LOCKED 15 #define SGX_ENTRYEPOCH_LOCKED 15
#define SGX_INVALID_LICENSE 16 #define SGX_INVALID_EINITTOKEN 16
#define SGX_PREV_TRK_INCMPL 17 #define SGX_PREV_TRK_INCMPL 17
#define SGX_PG_IS_SECS 18 #define SGX_PG_IS_SECS 18
#define SGX_INVALID_CPUSVN 32 #define SGX_INVALID_CPUSVN 32
@ -108,7 +106,7 @@
*/ */
struct sgx_enclave_create { struct sgx_enclave_create {
__u64 src; __u64 src;
} __packed; } __attribute__((__packed__));
/** /**
* struct sgx_enclave_add_page - parameter structure for the * struct sgx_enclave_add_page - parameter structure for the
@ -123,23 +121,19 @@ struct sgx_enclave_add_page {
__u64 src; __u64 src;
__u64 secinfo; __u64 secinfo;
__u16 mrmask; __u16 mrmask;
} __packed; } __attribute__((__packed__));
/** /**
* struct sgx_enclave_init - parameter structure for the * struct sgx_enclave_init - parameter structure for the
* %SGX_IOC_ENCLAVE_INIT ioctl * %SGX_IOC_ENCLAVE_INIT ioctl
* @addr: address in the ELRANGE * @addr: address in the ELRANGE
* @sigstruct: address for the page data * @sigstruct: address for the page data
* @einittoken: address for the SECINFO data * @einittoken: EINITTOKEN
*/ */
struct sgx_enclave_init { struct sgx_enclave_init {
__u64 addr; __u64 addr;
__u64 sigstruct; __u64 sigstruct;
__u64 einittoken; __u64 einittoken;
} __packed; } __attribute__((__packed__));
struct sgx_enclave_destroy {
__u64 addr;
} __packed;
#endif /* _UAPI_ASM_X86_SGX_H */ #endif /* _UAPI_ASM_X86_SGX_H */

View File

@ -4,7 +4,7 @@
* *
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016-2017 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -21,7 +21,7 @@
* *
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016-2017 Intel Corporation.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@ -61,12 +61,11 @@
#include "sgx.h" #include "sgx.h"
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/shmem_fs.h> #include <linux/shmem_fs.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
#include <linux/sched/signal.h> #include <linux/sched/mm.h>
#else #else
#include <linux/signal.h> #include <linux/mm.h>
#endif #endif
#include "linux/file.h"
struct page *sgx_get_backing(struct sgx_encl *encl, struct page *sgx_get_backing(struct sgx_encl *encl,
struct sgx_encl_page *entry, struct sgx_encl_page *entry,
@ -101,23 +100,13 @@ void sgx_put_backing(struct page *backing_page, bool write)
put_page(backing_page); put_page(backing_page);
} }
struct vm_area_struct *sgx_find_vma(struct sgx_encl *encl, unsigned long addr)
{
struct vm_area_struct *vma;
vma = find_vma(encl->mm, addr);
if (vma && encl == vma->vm_private_data)
return vma;
sgx_dbg(encl, "cannot find VMA at 0x%lx\n", addr);
return NULL;
}
void sgx_zap_tcs_ptes(struct sgx_encl *encl, struct vm_area_struct *vma) void sgx_zap_tcs_ptes(struct sgx_encl *encl, struct vm_area_struct *vma)
{ {
struct sgx_epc_page *tmp;
struct sgx_encl_page *entry; struct sgx_encl_page *entry;
list_for_each_entry(entry, &encl->load_list, load_list) { list_for_each_entry(tmp, &encl->load_list, list) {
entry = tmp->encl_page;
if ((entry->flags & SGX_ENCL_PAGE_TCS) && if ((entry->flags & SGX_ENCL_PAGE_TCS) &&
entry->addr >= vma->vm_start && entry->addr >= vma->vm_start &&
entry->addr < vma->vm_end) entry->addr < vma->vm_end)
@ -129,11 +118,12 @@ void sgx_invalidate(struct sgx_encl *encl, bool flush_cpus)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long addr; unsigned long addr;
int ret;
for (addr = encl->base; addr < (encl->base + encl->size); for (addr = encl->base; addr < (encl->base + encl->size);
addr = vma->vm_end) { addr = vma->vm_end) {
vma = sgx_find_vma(encl, addr); ret = sgx_encl_find(encl->mm, addr, &vma);
if (vma) if (!ret && encl == vma->vm_private_data)
sgx_zap_tcs_ptes(encl, vma); sgx_zap_tcs_ptes(encl, vma);
else else
break; break;
@ -154,38 +144,6 @@ void sgx_flush_cpus(struct sgx_encl *encl)
on_each_cpu_mask(mm_cpumask(encl->mm), sgx_ipi_cb, NULL, 1); on_each_cpu_mask(mm_cpumask(encl->mm), sgx_ipi_cb, NULL, 1);
} }
/**
* sgx_find_encl - find an enclave
* @mm: mm struct of the current process
* @addr: address in the ELRANGE
* @vma: the VMA that is located in the given address
*
* Finds an enclave identified by the given address. Gives back the VMA, that is
* part of the enclave, located in that address.
*/
int sgx_find_encl(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct **vma)
{
struct sgx_encl *encl;
*vma = find_vma(mm, addr);
if (!(*vma) || (*vma)->vm_ops != &sgx_vm_ops ||
addr < (*vma)->vm_start)
return -EINVAL;
encl = (*vma)->vm_private_data;
if (!encl) {
pr_debug("%s: VMA exists but there is no enclave at 0x%p\n",
__func__, (void *)addr);
return -EINVAL;
}
if (encl->flags & SGX_ENCL_SUSPEND)
return SGX_POWER_LOST_ENCLAVE;
return 0;
}
static int sgx_eldu(struct sgx_encl *encl, static int sgx_eldu(struct sgx_encl *encl,
struct sgx_encl_page *encl_page, struct sgx_encl_page *encl_page,
struct sgx_epc_page *epc_page, struct sgx_epc_page *epc_page,
@ -194,7 +152,7 @@ static int sgx_eldu(struct sgx_encl *encl,
struct page *backing; struct page *backing;
struct page *pcmd; struct page *pcmd;
unsigned long pcmd_offset; unsigned long pcmd_offset;
struct sgx_page_info pginfo; struct sgx_pageinfo pginfo;
void *secs_ptr = NULL; void *secs_ptr = NULL;
void *epc_ptr; void *epc_ptr;
void *va_ptr; void *va_ptr;
@ -219,7 +177,7 @@ static int sgx_eldu(struct sgx_encl *encl,
} }
if (!is_secs) if (!is_secs)
secs_ptr = sgx_get_page(encl->secs_page.epc_page); secs_ptr = sgx_get_page(encl->secs.epc_page);
epc_ptr = sgx_get_page(epc_page); epc_ptr = sgx_get_page(epc_page);
va_ptr = sgx_get_page(encl_page->va_page->epc_page); va_ptr = sgx_get_page(encl_page->va_page->epc_page);
@ -253,7 +211,8 @@ out:
} }
static struct sgx_encl_page *sgx_do_fault(struct vm_area_struct *vma, static struct sgx_encl_page *sgx_do_fault(struct vm_area_struct *vma,
unsigned long addr, unsigned int flags) unsigned long addr,
unsigned int flags)
{ {
struct sgx_encl *encl = vma->vm_private_data; struct sgx_encl *encl = vma->vm_private_data;
struct sgx_encl_page *entry; struct sgx_encl_page *entry;
@ -317,11 +276,11 @@ static struct sgx_encl_page *sgx_do_fault(struct vm_area_struct *vma,
goto out; goto out;
} }
rc = sgx_eldu(encl, &encl->secs_page, secs_epc_page, true); rc = sgx_eldu(encl, &encl->secs, secs_epc_page, true);
if (rc) if (rc)
goto out; goto out;
encl->secs_page.epc_page = secs_epc_page; encl->secs.epc_page = secs_epc_page;
encl->flags &= ~SGX_ENCL_SECS_EVICTED; encl->flags &= ~SGX_ENCL_SECS_EVICTED;
/* Do not free */ /* Do not free */
@ -340,6 +299,7 @@ static struct sgx_encl_page *sgx_do_fault(struct vm_area_struct *vma,
*/ */
encl->secs_child_cnt++; encl->secs_child_cnt++;
epc_page->encl_page = entry;
entry->epc_page = epc_page; entry->epc_page = epc_page;
if (reserve) if (reserve)
@ -347,7 +307,7 @@ static struct sgx_encl_page *sgx_do_fault(struct vm_area_struct *vma,
/* Do not free */ /* Do not free */
epc_page = NULL; epc_page = NULL;
list_add_tail(&entry->load_list, &encl->load_list); list_add_tail(&entry->epc_page->list, &encl->load_list);
rc = vm_insert_pfn(vma, entry->addr, PFN_DOWN(entry->epc_page->pa)); rc = vm_insert_pfn(vma, entry->addr, PFN_DOWN(entry->epc_page->pa));
if (rc) { if (rc) {
@ -384,54 +344,33 @@ struct sgx_encl_page *sgx_fault_page(struct vm_area_struct *vma,
return entry; return entry;
} }
void sgx_encl_release(struct kref *ref) void sgx_eblock(struct sgx_encl *encl, struct sgx_epc_page *epc_page)
{ {
struct sgx_encl_page *entry; void *vaddr;
struct sgx_va_page *va_page; int ret;
struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
struct radix_tree_iter iter;
void **slot;
mutex_lock(&sgx_tgid_ctx_mutex); vaddr = sgx_get_page(epc_page);
if (!list_empty(&encl->encl_list)) ret = __eblock((unsigned long)vaddr);
list_del(&encl->encl_list); sgx_put_page(vaddr);
mutex_unlock(&sgx_tgid_ctx_mutex);
if (encl->mmu_notifier.ops) if (ret) {
mmu_notifier_unregister_no_release(&encl->mmu_notifier, sgx_crit(encl, "EBLOCK returned %d\n", ret);
encl->mm); sgx_invalidate(encl, true);
radix_tree_for_each_slot(slot, &encl->page_tree, &iter, 0) {
entry = *slot;
if (entry->epc_page) {
list_del(&entry->load_list);
sgx_free_page(entry->epc_page, encl);
}
radix_tree_delete(&encl->page_tree, entry->addr >> PAGE_SHIFT);
kfree(entry);
} }
while (!list_empty(&encl->va_pages)) { }
va_page = list_first_entry(&encl->va_pages,
struct sgx_va_page, list); void sgx_etrack(struct sgx_encl *encl)
list_del(&va_page->list); {
sgx_free_page(va_page->epc_page, encl); void *epc;
kfree(va_page); int ret;
}
epc = sgx_get_page(encl->secs.epc_page);
if (encl->secs_page.epc_page) ret = __etrack(epc);
sgx_free_page(encl->secs_page.epc_page, encl); sgx_put_page(epc);
encl->secs_page.epc_page = NULL; if (ret) {
sgx_crit(encl, "ETRACK returned %d\n", ret);
if (encl->tgid_ctx) sgx_invalidate(encl, true);
kref_put(&encl->tgid_ctx->refcount, sgx_tgid_ctx_release); }
if (encl->backing)
fput(encl->backing);
if (encl->pcmd)
fput(encl->pcmd);
kfree(encl);
} }

View File

@ -72,6 +72,7 @@
static void sgx_vma_open(struct vm_area_struct *vma) static void sgx_vma_open(struct vm_area_struct *vma)
{ {
struct sgx_encl *encl = vma->vm_private_data; struct sgx_encl *encl = vma->vm_private_data;
if (!encl) if (!encl)
return; return;
@ -84,6 +85,7 @@ static void sgx_vma_open(struct vm_area_struct *vma)
static void sgx_vma_close(struct vm_area_struct *vma) static void sgx_vma_close(struct vm_area_struct *vma)
{ {
struct sgx_encl *encl = vma->vm_private_data; struct sgx_encl *encl = vma->vm_private_data;
if (!encl) if (!encl)
return; return;