[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 2/2] x86/mm: factor out the code for shattering an l2 PTE



map_pages_to_xen and modify_xen_mappings are performing almost exactly
the same operations when shattering an l2 PTE, the only difference
being whether we want to flush.

Signed-off-by: Hongyan Xia <hongyxia@xxxxxxxxxx>

---
Changes in v2:
- improve asm.
- re-read pl2e from memory when taking the lock.
- move the allocation of l1t inside the shatter function.
---
 xen/arch/x86/mm.c | 95 ++++++++++++++++++++++++-----------------------
 1 file changed, 48 insertions(+), 47 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 6188a968ff..103c97b903 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5151,6 +5151,51 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned long v)
                          flush_area_local((const void *)v, f) : \
                          flush_area_all((const void *)v, f))
 
+/* Shatter an l2 entry and populate l1. If virt is passed in, also do flush. */
+static int shatter_l2e(l2_pgentry_t *pl2e, unsigned long virt, bool locking)
+{
+    unsigned int i;
+    l2_pgentry_t ol2e;
+    l1_pgentry_t ol1e, *l1t = alloc_xen_pagetable();
+
+    if ( l1t == NULL )
+        return -1;
+
+    ol2e = *pl2e;
+    ol1e = l1e_from_paddr(l2e_get_paddr(ol2e), 
lNf_to_l1f(l2e_get_flags(ol2e)));
+
+    for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
+    {
+        l1e_write(l1t + i, ol1e);
+        ol1e = l1e_from_intpte(
+                l1e_get_intpte(ol1e) + (1 << PAGE_SHIFT));
+    }
+    if ( locking )
+        spin_lock(&map_pgdir_lock);
+    if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) &&
+         (l2e_get_flags(*pl2e) & _PAGE_PSE) )
+    {
+        l2e_write_atomic(pl2e,
+                l2e_from_paddr((paddr_t)virt_to_maddr(l1t), 
__PAGE_HYPERVISOR));
+        l1t = NULL;
+    }
+    if ( locking )
+        spin_unlock(&map_pgdir_lock);
+    if ( virt )
+    {
+        unsigned int flush_flags =
+            FLUSH_TLB | FLUSH_ORDER(PAGETABLE_ORDER);
+
+        if ( l2e_get_flags(ol2e) & _PAGE_GLOBAL )
+            flush_flags |= FLUSH_TLB_GLOBAL;
+        flush_area(virt, flush_flags);
+    }
+    if ( l1t )
+        free_xen_pagetable(l1t);
+
+    return 0;
+}
+
 /* Shatter an l3 entry and populate l2. If virt is passed in, also do flush. */
 static int shatter_l3e(l3_pgentry_t *pl3e, unsigned long virt, bool locking)
 {
@@ -5363,9 +5408,6 @@ int map_pages_to_xen(
             }
             else if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
             {
-                unsigned int flush_flags =
-                    FLUSH_TLB | FLUSH_ORDER(PAGETABLE_ORDER);
-
                 /* Skip this PTE if there is no change. */
                 if ( (((l2e_get_pfn(*pl2e) & ~(L1_PAGETABLE_ENTRIES - 1)) +
                        l1_table_offset(virt)) == mfn_x(mfn)) &&
@@ -5384,32 +5426,9 @@ int map_pages_to_xen(
                     goto check_l3;
                 }
 
-                pl1e = alloc_xen_pagetable();
-                if ( pl1e == NULL )
+                /* Pass virt to indicate we need to flush. */
+                if ( shatter_l2e(pl2e, virt, locking) )
                     return -ENOMEM;
-
-                for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
-                    l1e_write(&pl1e[i],
-                              l1e_from_pfn(l2e_get_pfn(*pl2e) + i,
-                                           lNf_to_l1f(l2e_get_flags(*pl2e))));
-
-                if ( l2e_get_flags(*pl2e) & _PAGE_GLOBAL )
-                    flush_flags |= FLUSH_TLB_GLOBAL;
-
-                if ( locking )
-                    spin_lock(&map_pgdir_lock);
-                if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) &&
-                     (l2e_get_flags(*pl2e) & _PAGE_PSE) )
-                {
-                    l2e_write_atomic(pl2e, l2e_from_mfn(virt_to_mfn(pl1e),
-                                                        __PAGE_HYPERVISOR));
-                    pl1e = NULL;
-                }
-                if ( locking )
-                    spin_unlock(&map_pgdir_lock);
-                flush_area(virt, flush_flags);
-                if ( pl1e )
-                    free_xen_pagetable(pl1e);
             }
 
             pl1e  = l2e_to_l1e(*pl2e) + l1_table_offset(virt);
@@ -5632,26 +5651,8 @@ int modify_xen_mappings(unsigned long s, unsigned long 
e, unsigned int nf)
             else
             {
                 /* PSE: shatter the superpage and try again. */
-                pl1e = alloc_xen_pagetable();
-                if ( !pl1e )
+                if ( shatter_l2e(pl2e, 0, locking) )
                     return -ENOMEM;
-                for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
-                    l1e_write(&pl1e[i],
-                              l1e_from_pfn(l2e_get_pfn(*pl2e) + i,
-                                           l2e_get_flags(*pl2e) & ~_PAGE_PSE));
-                if ( locking )
-                    spin_lock(&map_pgdir_lock);
-                if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) &&
-                     (l2e_get_flags(*pl2e) & _PAGE_PSE) )
-                {
-                    l2e_write_atomic(pl2e, l2e_from_mfn(virt_to_mfn(pl1e),
-                                                        __PAGE_HYPERVISOR));
-                    pl1e = NULL;
-                }
-                if ( locking )
-                    spin_unlock(&map_pgdir_lock);
-                if ( pl1e )
-                    free_xen_pagetable(pl1e);
             }
         }
         else
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.