[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 18/84] x86/mm: drop lXe_to_lYe invocations in map_pages_to_xen



From: Wei Liu <wei.liu2@xxxxxxxxxx>

Map and unmap page tables where necessary.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/arch/x86/mm.c | 40 +++++++++++++++++++++++++++++-----------
 1 file changed, 29 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 267e4ceef5..2333e93151 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5141,8 +5141,10 @@ int map_pages_to_xen(
                 else
                 {
                     l2_pgentry_t *l2t;
+                    mfn_t l2t_mfn = l3e_get_mfn(ol3e);
+
+                    l2t = map_xen_pagetable_new(l2t_mfn);
 
-                    l2t = l3e_to_l2e(ol3e);
                     for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
                     {
                         ol2e = l2t[i];
@@ -5154,10 +5156,12 @@ int map_pages_to_xen(
                         {
                             unsigned int j;
                             l1_pgentry_t *l1t;
+                            mfn_t l1t_mfn = l2e_get_mfn(ol2e);
 
-                            l1t = l2e_to_l1e(ol2e);
+                            l1t = map_xen_pagetable_new(l1t_mfn);
                             for ( j = 0; j < L1_PAGETABLE_ENTRIES; j++ )
                                 flush_flags(l1e_get_flags(l1t[j]));
+                            UNMAP_XEN_PAGETABLE_NEW(l1t);
                         }
                     }
                     flush_area(virt, flush_flags);
@@ -5166,9 +5170,9 @@ int map_pages_to_xen(
                         ol2e = l2t[i];
                         if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) &&
                              !(l2e_get_flags(ol2e) & _PAGE_PSE) )
-                            free_xen_pagetable(l2e_to_l1e(ol2e));
+                            free_xen_pagetable_new(l2e_get_mfn(ol2e));
                     }
-                    free_xen_pagetable(l2t);
+                    free_xen_pagetable_new(l2t_mfn);
                 }
             }
 
@@ -5274,12 +5278,14 @@ int map_pages_to_xen(
                 else
                 {
                     l1_pgentry_t *l1t;
+                    mfn_t l1t_mfn = l2e_get_mfn(ol2e);
 
-                    l1t = l2e_to_l1e(ol2e);
+                    l1t = map_xen_pagetable_new(l1t_mfn);
                     for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
                         flush_flags(l1e_get_flags(l1t[i]));
                     flush_area(virt, flush_flags);
-                    free_xen_pagetable(l1t);
+                    UNMAP_XEN_PAGETABLE_NEW(l1t);
+                    free_xen_pagetable_new(l1t_mfn);
                 }
             }
 
@@ -5293,12 +5299,14 @@ int map_pages_to_xen(
             /* Normal page mapping. */
             if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
             {
+                /* XXX This forces page table to be populated */
                 pl1e = virt_to_xen_l1e(virt);
                 if ( pl1e == NULL )
                 {
                     ASSERT(rc == -ENOMEM);
                     goto out;
                 }
+                UNMAP_XEN_PAGETABLE_NEW(pl1e);
             }
             else if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
             {
@@ -5362,9 +5370,11 @@ int map_pages_to_xen(
                 }
             }
 
-            pl1e  = l2e_to_l1e(*pl2e) + l1_table_offset(virt);
+            pl1e  = map_xen_pagetable_new(l2e_get_mfn((*pl2e)));
+            pl1e += l1_table_offset(virt);
             ol1e  = *pl1e;
             l1e_write_atomic(pl1e, l1e_from_mfn(mfn, flags));
+            UNMAP_XEN_PAGETABLE_NEW(pl1e);
             if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
             {
                 unsigned int flush_flags = FLUSH_TLB | FLUSH_ORDER(0);
@@ -5385,6 +5395,7 @@ int map_pages_to_xen(
             {
                 unsigned long base_mfn;
                 l1_pgentry_t *l1t;
+                mfn_t l1t_mfn;
 
                 if ( locking )
                     spin_lock(&map_pgdir_lock);
@@ -5408,12 +5419,15 @@ int map_pages_to_xen(
                     goto check_l3;
                 }
 
-                l1t = l2e_to_l1e(ol2e);
+                l1t_mfn = l2e_get_mfn(ol2e);
+                l1t = map_xen_pagetable_new(l1t_mfn);
+
                 base_mfn = l1e_get_pfn(l1t[0]) & ~(L1_PAGETABLE_ENTRIES - 1);
                 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
                     if ( (l1e_get_pfn(l1t[i]) != (base_mfn + i)) ||
                          (l1e_get_flags(l1t[i]) != flags) )
                         break;
+                UNMAP_XEN_PAGETABLE_NEW(l1t);
                 if ( i == L1_PAGETABLE_ENTRIES )
                 {
                     l2e_write_atomic(pl2e, l2e_from_pfn(base_mfn,
@@ -5423,7 +5437,7 @@ int map_pages_to_xen(
                     flush_area(virt - PAGE_SIZE,
                                FLUSH_TLB_GLOBAL |
                                FLUSH_ORDER(PAGETABLE_ORDER));
-                    free_xen_pagetable(l2e_to_l1e(ol2e));
+                    free_xen_pagetable_new(l1t_mfn);
                 }
                 else if ( locking )
                     spin_unlock(&map_pgdir_lock);
@@ -5439,6 +5453,7 @@ int map_pages_to_xen(
         {
             unsigned long base_mfn;
             l2_pgentry_t *l2t;
+            mfn_t l2t_mfn;
 
             if ( locking )
                 spin_lock(&map_pgdir_lock);
@@ -5456,7 +5471,9 @@ int map_pages_to_xen(
                 goto end_of_loop;
             }
 
-            l2t = l3e_to_l2e(ol3e);
+            l2t_mfn = l3e_get_mfn(ol3e);
+            l2t = map_xen_pagetable_new(l2t_mfn);
+
             base_mfn = l2e_get_pfn(l2t[0]) & ~(L2_PAGETABLE_ENTRIES *
                                               L1_PAGETABLE_ENTRIES - 1);
             for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
@@ -5464,6 +5481,7 @@ int map_pages_to_xen(
                       (base_mfn + (i << PAGETABLE_ORDER))) ||
                      (l2e_get_flags(l2t[i]) != l1f_to_lNf(flags)) )
                     break;
+            UNMAP_XEN_PAGETABLE_NEW(l2t);
             if ( i == L2_PAGETABLE_ENTRIES )
             {
                 l3e_write_atomic(pl3e, l3e_from_pfn(base_mfn,
@@ -5473,7 +5491,7 @@ int map_pages_to_xen(
                 flush_area(virt - PAGE_SIZE,
                            FLUSH_TLB_GLOBAL |
                            FLUSH_ORDER(2*PAGETABLE_ORDER));
-                free_xen_pagetable(l3e_to_l2e(ol3e));
+                free_xen_pagetable_new(l2t_mfn);
             }
             else if ( locking )
                 spin_unlock(&map_pgdir_lock);
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.