[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] memory hotadd 3/7: Function to share m2p tables with guest.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1260521708 0
# Node ID 0ca5a5f477be011814ee35949dfcefd53fb4390b
# Parent  adb62ca21d31fa3d2bbdae76493c86348fc48019
memory hotadd 3/7: Function to share m2p tables with guest.

The m2p tables should be shared by guest as they will be read-only
mapped by guest. This logical is similar to what happens in
subarch_init_memory(). But we need check the mapping is just setup.

Signed-off-by: Jiang, Yunhong <yunhong.jiang@xxxxxxxxx>
---
 xen/arch/x86/x86_64/mm.c |   61 +++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 61 insertions(+)

diff -r adb62ca21d31 -r 0ca5a5f477be xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Fri Dec 11 08:54:37 2009 +0000
+++ b/xen/arch/x86/x86_64/mm.c  Fri Dec 11 08:55:08 2009 +0000
@@ -247,6 +247,67 @@ static int m2p_mapped(unsigned long spfn
         return M2P_2M_MAPPED;
 
     return M2P_NO_MAPPED;
+}
+
+int share_hotadd_m2p_table(struct mem_hotadd_info *info)
+{
+    unsigned long i, n, v, m2p_start_mfn = 0;
+    l3_pgentry_t l3e;
+    l2_pgentry_t l2e;
+
+    /* M2P table is mappable read-only by privileged domains. */
+    for ( v  = RDWR_MPT_VIRT_START;
+          v != RDWR_MPT_VIRT_END;
+          v += n << PAGE_SHIFT )
+    {
+        n = L2_PAGETABLE_ENTRIES * L1_PAGETABLE_ENTRIES;
+        l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[
+            l3_table_offset(v)];
+        if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
+            continue;
+        if ( !(l3e_get_flags(l3e) & _PAGE_PSE) )
+        {
+            n = L1_PAGETABLE_ENTRIES;
+            l2e = l3e_to_l2e(l3e)[l2_table_offset(v)];
+            if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
+                continue;
+            m2p_start_mfn = l2e_get_pfn(l2e);
+        }
+        else
+            continue;
+
+        for ( i = 0; i < n; i++ )
+        {
+            struct page_info *page = mfn_to_page(m2p_start_mfn + i);
+            if (hotadd_mem_valid(m2p_start_mfn + i, info))
+                share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
+        }
+    }
+
+    for ( v  = RDWR_COMPAT_MPT_VIRT_START;
+          v != RDWR_COMPAT_MPT_VIRT_END;
+          v += 1 << L2_PAGETABLE_SHIFT )
+    {
+        l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[
+            l3_table_offset(v)];
+        if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
+            continue;
+        l2e = l3e_to_l2e(l3e)[l2_table_offset(v)];
+        if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
+            continue;
+        m2p_start_mfn = l2e_get_pfn(l2e);
+
+        for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
+        {
+            struct page_info *page = mfn_to_page(m2p_start_mfn + i);
+            if (hotadd_mem_valid(m2p_start_mfn + i, info))
+            {
+                printk("now share page %lx\n", m2p_start_mfn + i);
+                share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
+            }
+        }
+    }
+    return 0;
 }
 
 static void destroy_compat_m2p_mapping(struct mem_hotadd_info *info)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.