[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 15/18] arm/altp2m: Add altp2m paging mechanism.



This commit adds the function p2m_altp2m_lazy_copy implementing the
altp2m paging mechanism. The function p2m_altp2m_lazy_copy lazily copies
the hostp2m's mapping into the currently active altp2m view on 2nd stage
instruction or data access violations. Every altp2m violation generates
a vm_event.

Signed-off-by: Sergej Proskurin <proskurin@xxxxxxxxxxxxx>
---
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
---
 xen/arch/arm/p2m.c           | 130 ++++++++++++++++++++++++++++++++++++++++++-
 xen/arch/arm/traps.c         | 102 +++++++++++++++++++++++++++------
 xen/include/asm-arm/altp2m.h |   4 +-
 xen/include/asm-arm/p2m.h    |  17 ++++--
 4 files changed, 224 insertions(+), 29 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 395ea0f..96892a5 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -15,6 +15,7 @@
 #include <asm/hardirq.h>
 #include <asm/page.h>
 
+#include <asm/vm_event.h>
 #include <asm/altp2m.h>
 
 #ifdef CONFIG_ARM_64
@@ -1955,6 +1956,12 @@ void __init setup_virt_paging(void)
     smp_call_function(setup_virt_paging_one, (void *)val, 1);
 }
 
+void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
+{
+    if ( altp2m_active(v->domain) )
+        p2m_switch_vcpu_altp2m_by_id(v, idx);
+}
+
 bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
 {
     int rc;
@@ -1962,13 +1969,14 @@ bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, 
const struct npfec npfec)
     xenmem_access_t xma;
     vm_event_request_t *req;
     struct vcpu *v = current;
-    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
+    struct domain *d = v->domain;
+    struct p2m_domain *p2m = altp2m_active(d) ? p2m_get_altp2m(v) : 
p2m_get_hostp2m(d);
 
     /* Mem_access is not in use. */
     if ( !p2m->mem_access_enabled )
         return true;
 
-    rc = p2m_get_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), &xma);
+    rc = p2m_get_mem_access(d, _gfn(paddr_to_pfn(gpa)), &xma);
     if ( rc )
         return true;
 
@@ -2074,6 +2082,14 @@ bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, 
const struct npfec npfec)
         req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
         req->vcpu_id = v->vcpu_id;
 
+        vm_event_fill_regs(req);
+
+        if ( altp2m_active(v->domain) )
+        {
+            req->flags |= VM_EVENT_FLAG_ALTERNATE_P2M;
+            req->altp2m_idx = vcpu_altp2m(v).p2midx;
+        }
+
         mem_access_send_req(v->domain, req);
         xfree(req);
     }
@@ -2356,6 +2372,116 @@ struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
     return v->domain->arch.altp2m_p2m[index];
 }
 
+bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
+{
+    struct domain *d = v->domain;
+    bool_t rc = 0;
+
+    if ( idx >= MAX_ALTP2M )
+        return rc;
+
+    altp2m_lock(d);
+
+    if ( d->arch.altp2m_vttbr[idx] != INVALID_MFN )
+    {
+        if ( idx != vcpu_altp2m(v).p2midx )
+        {
+            atomic_dec(&p2m_get_altp2m(v)->active_vcpus);
+            vcpu_altp2m(v).p2midx = idx;
+            atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
+        }
+        rc = 1;
+    }
+
+    altp2m_unlock(d);
+
+    return rc;
+}
+
+/*
+ * If the fault is for a not present entry:
+ *     if the entry in the host p2m has a valid mfn, copy it and retry
+ *     else indicate that outer handler should handle fault
+ *
+ * If the fault is for a present entry:
+ *     indicate that outer handler should handle fault
+ */
+bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
+                            unsigned long gva, struct npfec npfec,
+                            struct p2m_domain **ap2m)
+{
+    struct domain *d = v->domain;
+    struct p2m_domain *hp2m = p2m_get_hostp2m(v->domain);
+    p2m_type_t p2mt;
+    xenmem_access_t xma;
+    paddr_t maddr, mask = 0;
+    gfn_t gfn = _gfn(paddr_to_pfn(gpa));
+    unsigned int level;
+    unsigned long mattr;
+    int rc = 0;
+
+    static const p2m_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
+        ACCESS(n),
+        ACCESS(r),
+        ACCESS(w),
+        ACCESS(rw),
+        ACCESS(x),
+        ACCESS(rx),
+        ACCESS(wx),
+        ACCESS(rwx),
+        ACCESS(rx2rw),
+        ACCESS(n2rwx),
+#undef ACCESS
+    };
+
+    *ap2m = p2m_get_altp2m(v);
+    if ( *ap2m == NULL)
+        return 0;
+
+    /* Check if entry is part of the altp2m view */
+    spin_lock(&(*ap2m)->lock);
+    maddr = __p2m_lookup(*ap2m, gpa, NULL);
+    spin_unlock(&(*ap2m)->lock);
+    if ( maddr != INVALID_PADDR )
+        return 0;
+
+    /* Check if entry is part of the host p2m view */
+    spin_lock(&hp2m->lock);
+    maddr = __p2m_lookup(hp2m, gpa, &p2mt);
+    if ( maddr == INVALID_PADDR )
+        goto out;
+
+    rc = __p2m_get_mem_access(hp2m, gfn, &xma);
+    if ( rc )
+        goto out;
+
+    rc = p2m_get_gfn_level_and_attr(hp2m, gpa, &level, &mattr);
+    if ( rc )
+        goto out;
+    spin_unlock(&hp2m->lock);
+
+    mask = level_masks[level];
+
+    rc = apply_p2m_changes(d, *ap2m, INSERT,
+                           pfn_to_paddr(gfn_x(gfn)) & mask,
+                           (pfn_to_paddr(gfn_x(gfn)) + level_sizes[level]) & 
mask,
+                           maddr & mask, mattr, 0, p2mt,
+                           memaccess[xma]);
+    if ( rc )
+    {
+        gdprintk(XENLOG_ERR, "failed to set entry for %lx -> %lx p2m %lx\n",
+                (unsigned long)pfn_to_paddr(gfn_x(gfn)), (unsigned 
long)(maddr), (unsigned long)*ap2m);
+        domain_crash(hp2m->domain);
+    }
+
+    return 1;
+
+out:
+    spin_unlock(&hp2m->lock);
+    return 0;
+}
+
 static void p2m_init_altp2m_helper(struct domain *d, unsigned int i)
 {
     struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 6995971..78db2cf 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -48,6 +48,8 @@
 #include <asm/gic.h>
 #include <asm/vgic.h>
 
+#include <asm/altp2m.h>
+
 /* The base of the stack must always be double-word aligned, which means
  * that both the kernel half of struct cpu_user_regs (which is pushed in
  * entry.S) and struct cpu_info (which lives at the bottom of a Xen
@@ -2383,35 +2385,64 @@ static void do_trap_instr_abort_guest(struct 
cpu_user_regs *regs,
 {
     int rc;
     register_t gva = READ_SYSREG(FAR_EL2);
+    struct vcpu *v = current;
+    struct domain *d = v->domain;
+    struct p2m_domain *p2m = NULL;
+    paddr_t gpa;
+
+    if ( hsr.iabt.s1ptw )
+        gpa = get_faulting_ipa();
+    else
+    {
+        /*
+         * Flush the TLB to make sure the DTLB is clear before
+         * doing GVA->IPA translation. If we got here because of
+         * an entry only present in the ITLB, this translation may
+         * still be inaccurate.
+         */
+        flush_tlb_local();
+
+        rc = gva_to_ipa(gva, &gpa, GV2M_READ);
+        if ( rc == -EFAULT )
+            goto bad_insn_abort;
+    }
 
     switch ( hsr.iabt.ifsc & 0x3f )
     {
+    case FSC_FLT_TRANS ... FSC_FLT_TRANS + 3:
+    {
+        if ( altp2m_active(d) )
+        {
+            const struct npfec npfec = {
+                .insn_fetch = 1,
+                .gla_valid = 1,
+                .kind = hsr.iabt.s1ptw ? npfec_kind_in_gpt : 
npfec_kind_with_gla
+            };
+
+            /*
+             * Copy the entire page of the failing instruction into the
+             * currently active altp2m view.
+             */
+            if ( p2m_altp2m_lazy_copy(v, gpa, gva, npfec, &p2m) )
+                return;
+
+            rc = p2m_mem_access_check(gpa, gva, npfec);
+
+            /* Trap was triggered by mem_access, work here is done */
+            if ( !rc )
+                return;
+        }
+
+        break;
+    }
     case FSC_FLT_PERM ... FSC_FLT_PERM + 3:
     {
-        paddr_t gpa;
         const struct npfec npfec = {
             .insn_fetch = 1,
             .gla_valid = 1,
             .kind = hsr.iabt.s1ptw ? npfec_kind_in_gpt : npfec_kind_with_gla
         };
 
-        if ( hsr.iabt.s1ptw )
-            gpa = get_faulting_ipa();
-        else
-        {
-            /*
-             * Flush the TLB to make sure the DTLB is clear before
-             * doing GVA->IPA translation. If we got here because of
-             * an entry only present in the ITLB, this translation may
-             * still be inaccurate.
-             */
-            flush_tlb_local();
-
-            rc = gva_to_ipa(gva, &gpa, GV2M_READ);
-            if ( rc == -EFAULT )
-                goto bad_insn_abort;
-        }
-
         rc = p2m_mem_access_check(gpa, gva, npfec);
 
         /* Trap was triggered by mem_access, work here is done */
@@ -2429,6 +2460,8 @@ static void do_trap_data_abort_guest(struct cpu_user_regs 
*regs,
                                      const union hsr hsr)
 {
     const struct hsr_dabt dabt = hsr.dabt;
+    struct vcpu *v = current;
+    struct p2m_domain *p2m = NULL;
     int rc;
     mmio_info_t info;
 
@@ -2449,6 +2482,12 @@ static void do_trap_data_abort_guest(struct 
cpu_user_regs *regs,
         info.gpa = get_faulting_ipa();
     else
     {
+        /*
+         * When using altp2m, this flush is required to get rid of old TLB
+         * entries and use the new, lazily copied, ap2m entries.
+         */
+        flush_tlb_local();
+
         rc = gva_to_ipa(info.gva, &info.gpa, GV2M_READ);
         if ( rc == -EFAULT )
             goto bad_data_abort;
@@ -2456,6 +2495,33 @@ static void do_trap_data_abort_guest(struct 
cpu_user_regs *regs,
 
     switch ( dabt.dfsc & 0x3f )
     {
+    case FSC_FLT_TRANS ... FSC_FLT_TRANS + 3:
+    {
+        if ( altp2m_active(current->domain) )
+        {
+            const struct npfec npfec = {
+                .read_access = !dabt.write,
+                .write_access = dabt.write,
+                .gla_valid = 1,
+                .kind = dabt.s1ptw ? npfec_kind_in_gpt : npfec_kind_with_gla
+            };
+
+            /*
+             * Copy the entire page of the failing data access into the
+             * currently active altp2m view.
+             */
+            if ( p2m_altp2m_lazy_copy(v, info.gpa, info.gva, npfec, &p2m) )
+                return;
+
+            rc = p2m_mem_access_check(info.gpa, info.gva, npfec);
+
+            /* Trap was triggered by mem_access, work here is done */
+            if ( !rc )
+                return;
+        }
+
+        break;
+    }
     case FSC_FLT_PERM ... FSC_FLT_PERM + 3:
     {
         const struct npfec npfec = {
diff --git a/xen/include/asm-arm/altp2m.h b/xen/include/asm-arm/altp2m.h
index ec4aa09..2a87d14 100644
--- a/xen/include/asm-arm/altp2m.h
+++ b/xen/include/asm-arm/altp2m.h
@@ -31,9 +31,7 @@ static inline bool_t altp2m_active(const struct domain *d)
 /* Alternate p2m VCPU */
 static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v)
 {
-    /* Not implemented on ARM, should not be reached. */
-    BUG();
-    return 0;
+    return vcpu_altp2m(v).p2midx;
 }
 
 void altp2m_vcpu_initialise(struct vcpu *v);
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 451b097..b82e4b9 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -115,12 +115,6 @@ void p2m_mem_access_emulate_check(struct vcpu *v,
     /* Not supported on ARM. */
 }
 
-static inline
-void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
-{
-    /* Not supported on ARM. */
-}
-
 /*
  * Alternate p2m: shadow p2m tables used for alternate memory views.
  */
@@ -131,12 +125,23 @@ void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
 /* Get current alternate p2m table */
 struct p2m_domain *p2m_get_altp2m(struct vcpu *v);
 
+/* Switch alternate p2m for a single vcpu */
+bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx);
+
+/* Check to see if vcpu should be switched to a different p2m. */
+void p2m_altp2m_check(struct vcpu *v, uint16_t idx);
+
 /* Flush all the alternate p2m's for a domain */
 void p2m_flush_altp2m(struct domain *d);
 
 /* Make a specific alternate p2m valid */
 int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx);
 
+/* Alternate p2m paging */
+bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
+                            unsigned long gla, struct npfec npfec,
+                            struct p2m_domain **ap2m);
+
 /* Find an available alternate p2m and make it valid */
 int p2m_init_next_altp2m(struct domain *d, uint16_t *idx);
 
-- 
2.8.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.