[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] memory hotadd 2/7: Destroy m2p table for hot-added memory when hot-add failed.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1260521677 0
# Node ID adb62ca21d31fa3d2bbdae76493c86348fc48019
# Parent  b7cf749e14fceb3ec283b57a5898820ba978275e
memory hotadd 2/7: Destroy m2p table for hot-added memory when hot-add failed.

As when we destroy the m2p table, it should not be used, so we don't
need consider clean the head/tail mapping that may exits before hot-add.

Signed-off-by: Jiang, Yunhong <yunhong.jiang@xxxxxxxxx>
---
 xen/arch/x86/x86_64/mm.c |   98 +++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 98 insertions(+)

diff -r b7cf749e14fc -r adb62ca21d31 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Fri Dec 11 08:53:57 2009 +0000
+++ b/xen/arch/x86/x86_64/mm.c  Fri Dec 11 08:54:37 2009 +0000
@@ -247,6 +247,104 @@ static int m2p_mapped(unsigned long spfn
         return M2P_2M_MAPPED;
 
     return M2P_NO_MAPPED;
+}
+
+static void destroy_compat_m2p_mapping(struct mem_hotadd_info *info)
+{
+    unsigned long i, va, rwva, pt_pfn;
+    unsigned long smap = info->spfn, emap = info->spfn;
+
+    l3_pgentry_t *l3_ro_mpt;
+    l2_pgentry_t *l2_ro_mpt;
+
+    if ( smap > ((RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) >> 2) 
)
+        return;
+
+    if ( emap > ((RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) >> 2) 
)
+        emap = (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) >> 2;
+
+    l3_ro_mpt = 
l4e_to_l3e(idle_pg_table[l4_table_offset(HIRO_COMPAT_MPT_VIRT_START)]);
+
+    
ASSERT(l3e_get_flags(l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)]) & 
_PAGE_PRESENT);
+
+    l2_ro_mpt = 
l3e_to_l2e(l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)]);
+
+    for ( i = smap; i < emap; )
+    {
+        va = HIRO_COMPAT_MPT_VIRT_START +
+              i * sizeof(*compat_machine_to_phys_mapping);
+        rwva = RDWR_COMPAT_MPT_VIRT_START +
+             i * sizeof(*compat_machine_to_phys_mapping);
+        if ( l2e_get_flags(l2_ro_mpt[l2_table_offset(va)]) & _PAGE_PRESENT )
+        {
+            pt_pfn = l2e_get_pfn(l2_ro_mpt[l2_table_offset(va)]);
+            if ( hotadd_mem_valid(pt_pfn, info) )
+            {
+                destroy_xen_mappings(rwva, rwva +
+                        (1UL << L2_PAGETABLE_SHIFT));
+                l2e_write(&l2_ro_mpt[l2_table_offset(va)], l2e_empty());
+            }
+        }
+
+        i += 1UL < (L2_PAGETABLE_SHIFT - 2);
+    }
+
+    return;
+}
+
+void destroy_m2p_mapping(struct mem_hotadd_info *info)
+{
+    l3_pgentry_t *l3_ro_mpt;
+    unsigned long i, va, rwva;
+    unsigned long smap = info->spfn, emap = info->epfn;
+
+    l3_ro_mpt = l4e_to_l3e(idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]);
+
+    /*
+     * No need to clean m2p structure existing before the hotplug
+     */
+    for (i = smap; i < emap;)
+    {
+        unsigned long pt_pfn;
+        l2_pgentry_t *l2_ro_mpt;
+
+        va = RO_MPT_VIRT_START + i * sizeof(*machine_to_phys_mapping);
+        rwva = RDWR_MPT_VIRT_START + i * sizeof(*machine_to_phys_mapping);
+
+        /* 1G mapping should not be created by mem hotadd */
+        if (!(l3e_get_flags(l3_ro_mpt[l3_table_offset(va)]) & _PAGE_PRESENT) ||
+            (l3e_get_flags(l3_ro_mpt[l3_table_offset(va)]) & _PAGE_PSE))
+        {
+            i = ( i & ~((1UL << (L3_PAGETABLE_SHIFT - 3)) - 1)) +
+                (1UL << (L3_PAGETABLE_SHIFT - 3) );
+            continue;
+        }
+
+        l2_ro_mpt = l3e_to_l2e(l3_ro_mpt[l3_table_offset(va)]);
+        if (!(l2e_get_flags(l2_ro_mpt[l2_table_offset(va)]) & _PAGE_PRESENT))
+        {
+            i = ( i & ~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1)) +
+                    (1UL << (L2_PAGETABLE_SHIFT - 3)) ;
+            continue;
+        }
+
+        pt_pfn = l2e_get_pfn(l2_ro_mpt[l2_table_offset(va)]);
+        if ( hotadd_mem_valid(pt_pfn, info) )
+        {
+            destroy_xen_mappings(rwva, rwva + (1UL << L2_PAGETABLE_SHIFT));
+
+            l2_ro_mpt = l3e_to_l2e(l3_ro_mpt[l3_table_offset(va)]);
+            l2e_write(&l2_ro_mpt[l2_table_offset(va)], l2e_empty());
+        }
+        i = ( i & ~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1)) +
+              (1UL << (L2_PAGETABLE_SHIFT - 3));
+    }
+
+    destroy_compat_m2p_mapping(info);
+
+    /* Brute-Force flush all TLB */
+    flush_tlb_all();
+    return;
 }
 
 /*

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.