[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 06/15] xen: x86: add SGX basic EPC management



EPC is limited resource reserved by BIOS. Typically EPC size is from dozens of
MB to more than a hundred MB. EPC is reported as reserved memory in e820 but
not normal memory. EPC must be managed in 4K pages.

From implementation's view, we can choose either to manage EPC separately, or
to extend existing memory management code to support EPC. The latter has
advantage of being able to use existing memory management algorithm but is
more complicated to implement (thus more risky), while the former is more
simple but has to write own EPC management algorithm. Currently we choose the
former. Given the fact that EPC size is small, currently we simply put all EPC
pages into single list, so allocation and free are very straightforward.

Like there is one 'struct page_info' for each memory page, a 'struct epc_page'
is added to represent status of each EPC page, and all 'struct epc_page' will
be in an array which is allocated during SGX initialization. Entire EPC is also
mapped to Xen's virtual address so that each EPC page's virtual address can be
calculated by base virtual address + offset.

Signed-off-by: Kai Huang <kai.huang@xxxxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/sgx.c        | 154 ++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/hvm/vmx/sgx.h |  19 +++++
 2 files changed, 173 insertions(+)

diff --git a/xen/arch/x86/hvm/vmx/sgx.c b/xen/arch/x86/hvm/vmx/sgx.c
index 6b41469371..f4c9b2f933 100644
--- a/xen/arch/x86/hvm/vmx/sgx.c
+++ b/xen/arch/x86/hvm/vmx/sgx.c
@@ -7,12 +7,89 @@
 #include <asm/cpufeature.h>
 #include <asm/msr-index.h>
 #include <asm/msr.h>
+#include <xen/errno.h>
+#include <xen/mm.h>
 #include <asm/hvm/vmx/sgx.h>
 #include <asm/hvm/vmx/vmcs.h>
 
 static struct sgx_cpuinfo __read_mostly sgx_cpudata[NR_CPUS];
 static struct sgx_cpuinfo __read_mostly boot_sgx_cpudata;
 
+/*
+ * epc_frametable keeps an array of struct epc_page for every EPC pages, so 
that
+ * epc_page_to_mfn, epc_mfn_to_page works straightforwardly. The array will be
+ * allocated dynamically according to machine's EPC size.
+ */
+static struct epc_page *epc_frametable = NULL;
+/*
+ * EPC is mapped to Xen's virtual address at once, so that each EPC page's
+ * virtual address is epc_base_vaddr + offset.
+ */
+static void *epc_base_vaddr = NULL;
+
+/* Global free EPC pages list. */
+static struct list_head free_epc_list;
+static spinlock_t epc_lock;
+
+#define total_epc_npages (boot_sgx_cpudata.epc_size >> PAGE_SHIFT)
+#define epc_base_mfn (boot_sgx_cpudata.epc_base >> PAGE_SHIFT)
+
+/* Current number of free EPC pages in free_epc_list */
+static unsigned long free_epc_npages = 0;
+
+unsigned long epc_page_to_mfn(struct epc_page *epg)
+{
+    BUG_ON(!epc_frametable);
+    BUG_ON(!epc_base_mfn);
+
+    return epc_base_mfn + (epg - epc_frametable);
+}
+
+struct epc_page *epc_mfn_to_page(unsigned long mfn)
+{
+    BUG_ON(!epc_frametable);
+    BUG_ON(!epc_base_mfn);
+
+    return epc_frametable + (mfn - epc_base_mfn);
+}
+
+struct epc_page *alloc_epc_page(void)
+{
+    struct epc_page *epg;
+
+    spin_lock(&epc_lock);
+    epg = list_first_entry_or_null(&free_epc_list, struct epc_page, list);
+    if ( epg ) {
+        list_del(&epg->list);
+        free_epc_npages--;
+    }
+    spin_unlock(&epc_lock);
+
+    return epg;
+}
+
+void free_epc_page(struct epc_page *epg)
+{
+    spin_lock(&epc_lock);
+    list_add_tail(&epg->list, &free_epc_list);
+    free_epc_npages++;
+    spin_unlock(&epc_lock);
+}
+
+void *map_epc_page_to_xen(struct epc_page *epg)
+{
+    BUG_ON(!epc_base_vaddr);
+    BUG_ON(!epc_frametable);
+
+    return (void *)(((unsigned long)(epc_base_vaddr)) +
+            ((epg - epc_frametable) << PAGE_SHIFT));
+}
+
+void unmap_epc_page(void *addr)
+{
+    /* Nothing */
+}
+
 static bool_t sgx_enabled_in_bios(void)
 {
     uint64_t val, sgx_enabled = IA32_FEATURE_CONTROL_SGX_ENABLE |
@@ -177,6 +254,80 @@ static bool_t __init check_sgx_consistency(void)
     return true;
 }
 
+static int inline npages_to_order(unsigned long npages)
+{
+    int order = 0;
+
+    while ( (1 << order) < npages )
+        order++;
+
+    return order;
+}
+
+static int __init init_epc_frametable(unsigned long npages)
+{
+    unsigned long i, order;
+
+    order = npages * sizeof(struct epc_page);
+    order >>= 12;
+    order = npages_to_order(order);
+
+    epc_frametable = alloc_xenheap_pages(order, 0);
+    if ( !epc_frametable )
+        return -ENOMEM;
+
+    for ( i = 0; i < npages; i++ )
+    {
+        struct epc_page *epg = epc_frametable + i;
+
+        list_add_tail(&epg->list, &free_epc_list);
+    }
+
+    return 0;
+}
+
+static void destroy_epc_frametable(unsigned long npages)
+{
+    unsigned long order;
+
+    if ( !epc_frametable )
+        return;
+
+    order = npages * sizeof(struct epc_page);
+    order >>= 12;
+    order = npages_to_order(order);
+
+    free_xenheap_pages(epc_frametable, order);
+}
+
+static int __init sgx_init_epc(void)
+{
+    int r;
+
+    INIT_LIST_HEAD(&free_epc_list);
+    spin_lock_init(&epc_lock);
+
+    r = init_epc_frametable(total_epc_npages);
+    if ( r )
+    {
+        printk("Failed to allocate EPC frametable. Disable SGX.\n");
+        return r;
+    }
+
+    epc_base_vaddr = ioremap_cache(epc_base_mfn << PAGE_SHIFT,
+            total_epc_npages << PAGE_SHIFT);
+    if ( !epc_base_vaddr )
+    {
+        printk("Failed to ioremap_cache EPC. Disable SGX.\n");
+        destroy_epc_frametable(total_epc_npages);
+        return -EFAULT;
+    }
+
+    free_epc_npages = total_epc_npages;
+
+    return 0;
+}
+
 static int __init sgx_init(void)
 {
     /* Assume CPU 0 is always online */
@@ -188,6 +339,9 @@ static int __init sgx_init(void)
     if ( !check_sgx_consistency() )
         goto not_supported;
 
+    if ( sgx_init_epc() )
+        goto not_supported;
+
     print_sgx_cpuinfo(&boot_sgx_cpudata);
 
     return 0;
diff --git a/xen/include/asm-x86/hvm/vmx/sgx.h 
b/xen/include/asm-x86/hvm/vmx/sgx.h
index 5414d8237e..ff420e006e 100644
--- a/xen/include/asm-x86/hvm/vmx/sgx.h
+++ b/xen/include/asm-x86/hvm/vmx/sgx.h
@@ -12,6 +12,7 @@
 #include <xen/types.h>
 #include <xen/init.h>
 #include <asm/processor.h>
+#include <xen/list.h>
 
 #define SGX_CPUID 0x12
 
@@ -42,4 +43,22 @@ struct sgx_cpuinfo {
 /* Detect SGX info for particular CPU via SGX CPUID */
 void detect_sgx(int cpu);
 
+/*
+ * EPC page infomation structure. Each EPC has one struct epc_page to keep EPC
+ * page info, just like struct page_info for normal memory.
+ *
+ * So far in reality machine's EPC size won't execeed 100MB, so currently just
+ * put all free EPC pages in global free list.
+ */
+struct epc_page {
+    struct list_head list;  /* all free EPC pages are in global free list. */
+};
+
+struct epc_page *alloc_epc_page(void);
+void free_epc_page(struct epc_page *epg);
+unsigned long epc_page_to_mfn(struct epc_page *epg);
+struct epc_page *epc_mfn_to_page(unsigned long mfn);
+void *map_epc_page_to_xen(struct epc_page *epg);
+void unmap_epc_page(void *addr);
+
 #endif  /* __ASM_X86_HVM_VMX_SGX_H__ */
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.