[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 4/4] x86_64/mm: map and unmap page tables in destroy_m2p_mapping



From: Wei Liu <wei.liu2@xxxxxxxxxx>

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Signed-off-by: Hongyan Xia <hongyxia@xxxxxxxxxx>

---
Changed in v3:
- rename l2_ro_mpt into pl2e to avoid confusion.

Changed in v2:
- no point in re-mapping l2t because it is exactly the same as
  l2_ro_mpt.
- point l2_ro_mpt to the entry instead of doing l2_table_offset() all
  the time.
---
 xen/arch/x86/x86_64/mm.c | 19 ++++++++++++-------
 1 file changed, 12 insertions(+), 7 deletions(-)

diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index cfaeae84e9..30ed4e98dd 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -263,7 +263,8 @@ static void destroy_m2p_mapping(struct mem_hotadd_info 
*info)
     unsigned long i, va, rwva;
     unsigned long smap = info->spfn, emap = info->epfn;
 
-    l3_ro_mpt = l4e_to_l3e(idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]);
+    l3_ro_mpt = map_l3t_from_l4e(
+                    idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]);
 
     /*
      * No need to clean m2p structure existing before the hotplug
@@ -271,7 +272,7 @@ static void destroy_m2p_mapping(struct mem_hotadd_info 
*info)
     for (i = smap; i < emap;)
     {
         unsigned long pt_pfn;
-        l2_pgentry_t *l2_ro_mpt;
+        l2_pgentry_t *pl2e;
 
         va = RO_MPT_VIRT_START + i * sizeof(*machine_to_phys_mapping);
         rwva = RDWR_MPT_VIRT_START + i * sizeof(*machine_to_phys_mapping);
@@ -285,26 +286,30 @@ static void destroy_m2p_mapping(struct mem_hotadd_info 
*info)
             continue;
         }
 
-        l2_ro_mpt = l3e_to_l2e(l3_ro_mpt[l3_table_offset(va)]);
-        if (!(l2e_get_flags(l2_ro_mpt[l2_table_offset(va)]) & _PAGE_PRESENT))
+        pl2e = map_l2t_from_l3e(l3_ro_mpt[l3_table_offset(va)]) +
+                    l2_table_offset(va);
+        if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
         {
             i = ( i & ~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1)) +
                     (1UL << (L2_PAGETABLE_SHIFT - 3)) ;
+            UNMAP_DOMAIN_PAGE(pl2e);
             continue;
         }
 
-        pt_pfn = l2e_get_pfn(l2_ro_mpt[l2_table_offset(va)]);
+        pt_pfn = l2e_get_pfn(*pl2e);
         if ( hotadd_mem_valid(pt_pfn, info) )
         {
             destroy_xen_mappings(rwva, rwva + (1UL << L2_PAGETABLE_SHIFT));
 
-            l2_ro_mpt = l3e_to_l2e(l3_ro_mpt[l3_table_offset(va)]);
-            l2e_write(&l2_ro_mpt[l2_table_offset(va)], l2e_empty());
+            l2e_write(pl2e, l2e_empty());
         }
         i = ( i & ~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1)) +
               (1UL << (L2_PAGETABLE_SHIFT - 3));
+        UNMAP_DOMAIN_PAGE(pl2e);
     }
 
+    UNMAP_DOMAIN_PAGE(l3_ro_mpt);
+
     destroy_compat_m2p_mapping(info);
 
     /* Brute-Force flush all TLB */
-- 
2.24.1.AMZN




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.