[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Add memory paging support for MMU updates (mapping a domain's memory).



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1261031275 0
# Node ID 33d97bf8a376046feb5941698be997a958b64f79
# Parent  3b29ed4ffb15372ce6fea41dec1b796e2f6fec81
Add memory paging support for MMU updates (mapping a domain's memory).

If Domain-0 tries to map a page that has been paged out, then propagate an
error so that it knows to try again. If the page is paged out, request that
it be paged back in. If the page is in the process of being paged in, then
just keeping returning the error until it is paged back in.

This requires the co-operation of the Domain-0 kernel's privcmd mmap
functions. The kernel can't simply spin waiting for the page, as this will
cause a dead-lock (since the paging tool lives in Domain-0 user-space and if
it's spinning in kernel space, it will never return to user-space to allow the
page to be paged back in). There is a complimentary Linux patch which sees
ENOENT, which is not returned by any other part of this code, and marks the
PFN of that paged specially to indicate it was paged out (much like what it
does with PFNs that are within the range of a domain's memory but are not
presently mapped).

Signed-off-by: Patrick Colp <Patrick.Colp@xxxxxxxxxx>
---
 xen/arch/x86/mm.c |   80 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 files changed, 78 insertions(+), 2 deletions(-)

diff -r 3b29ed4ffb15 -r 33d97bf8a376 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu Dec 17 06:27:55 2009 +0000
+++ b/xen/arch/x86/mm.c Thu Dec 17 06:27:55 2009 +0000
@@ -3083,13 +3083,24 @@ int do_mmu_update(
              */
         case MMU_NORMAL_PT_UPDATE:
         case MMU_PT_UPDATE_PRESERVE_AD:
+        {
+            p2m_type_t p2mt;
+
             rc = xsm_mmu_normal_update(d, pg_owner, req.val);
             if ( rc )
                 break;
 
             req.ptr -= cmd;
             gmfn = req.ptr >> PAGE_SHIFT;
-            mfn = gmfn_to_mfn(pt_owner, gmfn);
+            mfn = mfn_x(gfn_to_mfn(pt_owner, gmfn, &p2mt));
+
+            if ( p2m_is_paged(p2mt) )
+            {
+                p2m_mem_paging_populate(pg_owner, gmfn);
+
+                rc = -ENOENT;
+                break;
+            }
 
             if ( unlikely(!get_page_from_pagenr(mfn, pt_owner)) )
             {
@@ -3109,6 +3120,22 @@ int do_mmu_update(
                 case PGT_l1_page_table:
                 {
                     l1_pgentry_t l1e = l1e_from_intpte(req.val);
+                    p2m_type_t l1e_p2mt;
+                    gfn_to_mfn(pg_owner, l1e_get_pfn(l1e), &l1e_p2mt);
+
+                    if ( p2m_is_paged(l1e_p2mt) )
+                    {
+                        p2m_mem_paging_populate(pg_owner, l1e_get_pfn(l1e));
+
+                        rc = -ENOENT;
+                        break;
+                    }
+                    else if ( p2m_ram_paging_in_start == l1e_p2mt )
+                    {
+                        rc = -ENOENT;
+                        break;
+                    }
+
                     okay = mod_l1_entry(va, l1e, mfn,
                                         cmd == MMU_PT_UPDATE_PRESERVE_AD, v,
                                         pg_owner);
@@ -3117,6 +3144,22 @@ int do_mmu_update(
                 case PGT_l2_page_table:
                 {
                     l2_pgentry_t l2e = l2e_from_intpte(req.val);
+                    p2m_type_t l2e_p2mt;
+                    gfn_to_mfn(pg_owner, l2e_get_pfn(l2e), &l2e_p2mt);
+
+                    if ( p2m_is_paged(l2e_p2mt) )
+                    {
+                        p2m_mem_paging_populate(pg_owner, l2e_get_pfn(l2e));
+
+                        rc = -ENOENT;
+                        break;
+                    }
+                    else if ( p2m_ram_paging_in_start == l2e_p2mt )
+                    {
+                        rc = -ENOENT;
+                        break;
+                    }
+
                     okay = mod_l2_entry(va, l2e, mfn,
                                         cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
                 }
@@ -3124,6 +3167,22 @@ int do_mmu_update(
                 case PGT_l3_page_table:
                 {
                     l3_pgentry_t l3e = l3e_from_intpte(req.val);
+                    p2m_type_t l3e_p2mt;
+                    gfn_to_mfn(pg_owner, l3e_get_pfn(l3e), &l3e_p2mt);
+
+                    if ( p2m_is_paged(l3e_p2mt) )
+                    {
+                        p2m_mem_paging_populate(pg_owner, l3e_get_pfn(l3e));
+
+                        rc = -ENOENT;
+                        break;
+                    }
+                    else if ( p2m_ram_paging_in_start == l3e_p2mt )
+                    {
+                        rc = -ENOENT;
+                        break;
+                    }
+
                     rc = mod_l3_entry(va, l3e, mfn,
                                       cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
                     okay = !rc;
@@ -3133,6 +3192,22 @@ int do_mmu_update(
                 case PGT_l4_page_table:
                 {
                     l4_pgentry_t l4e = l4e_from_intpte(req.val);
+                    p2m_type_t l4e_p2mt;
+                    gfn_to_mfn(pg_owner, l4e_get_pfn(l4e), &l4e_p2mt);
+
+                    if ( p2m_is_paged(l4e_p2mt) )
+                    {
+                        p2m_mem_paging_populate(pg_owner, l4e_get_pfn(l4e));
+
+                        rc = -ENOENT;
+                        break;
+                    }
+                    else if ( p2m_ram_paging_in_start == l4e_p2mt )
+                    {
+                        rc = -ENOENT;
+                        break;
+                    }
+
                     rc = mod_l4_entry(va, l4e, mfn,
                                       cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
                     okay = !rc;
@@ -3159,7 +3234,8 @@ int do_mmu_update(
 
             unmap_domain_page_with_cache(va, &mapcache);
             put_page(page);
-            break;
+        }
+        break;
 
         case MMU_MACHPHYS_UPDATE:
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.