[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Clean up shadow-translate xen patches. Add abstractions



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID ce057aa33cadc2a71ed5ef715217e577fc867408
# Parent  491a8798945e634976f05a69d6291b30674a172d
Clean up shadow-translate xen patches. Add abstractions
for adding/removing pages from a translated guest's
physmap. Define dummy functions so other architectures
will continue to build without errors.

Remove setting of XENFEAT_writable_mmu_structures. This
should set only if the hypervisor supports writable
mappings of all MMU structures (all page tables and
descriptor tables). If we want a mode where only PTEs
can be writable, we should add a feature flag for that
(but I don't think this is a useful mode to support).

TODO: The call to get the pfn hole should be a
XENMEM_ function, not another MMUEXT_OP (hopefully the
latter hypercall is not goign to grow any more as it's
gross enough already).

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 491a8798945e -r ce057aa33cad xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Sat Jan 28 11:09:45 2006
+++ b/xen/arch/x86/domain.c     Sat Jan 28 12:01:19 2006
@@ -348,7 +348,6 @@
     struct domain *d = v->domain;
     unsigned long phys_basetab;
     int i, rc;
-    unsigned got_basetab_type;
 
     /*
      * This is sufficient! If the descriptor DPL differs from CS RPL then we'll
@@ -408,27 +407,25 @@
 
     v->arch.guest_table = mk_pagetable(phys_basetab);
 
-    got_basetab_type = 0;
+    if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
+        return rc;
+
     if ( shadow_mode_refcounts(d) )
     {
         if ( !get_page(pfn_to_page(phys_basetab>>PAGE_SHIFT), d) )
+        {
+            destroy_gdt(v);
             return -EINVAL;
+        }
     }
     else if ( !(c->flags & VGCF_VMX_GUEST) )
     {
         if ( !get_page_and_type(pfn_to_page(phys_basetab>>PAGE_SHIFT), d,
                                 PGT_base_page_table) )
+        {
+            destroy_gdt(v);
             return -EINVAL;
-        got_basetab_type = 1;
-    }
-
-    if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
-    {
-        if (got_basetab_type)
-            put_page_and_type(pfn_to_page(phys_basetab>>PAGE_SHIFT));
-        else
-            put_page(pfn_to_page(phys_basetab>>PAGE_SHIFT));
-        return rc;
+        }
     }
 
     if ( c->flags & VGCF_VMX_GUEST )
diff -r 491a8798945e -r ce057aa33cad xen/common/grant_table.c
--- a/xen/common/grant_table.c  Sat Jan 28 11:09:45 2006
+++ b/xen/common/grant_table.c  Sat Jan 28 12:01:19 2006
@@ -521,7 +521,8 @@
     {
         ASSERT(d->grant_table != NULL);
         (void)put_user(GNTST_okay, &uop->status);
-        for ( i = 0; i < op.nr_frames; i++ ) {
+        for ( i = 0; i < op.nr_frames; i++ )
+        {
             mfn = __mfn_to_gpfn(d, gnttab_shared_mfn(d, d->grant_table, i));
             (void)put_user(mfn, &op.frame_list[i]);
         }
@@ -709,7 +710,7 @@
     int i;
     grant_entry_t *sha;
     gnttab_transfer_t gop;
-    unsigned long real_mfn;
+    unsigned long mfn;
 
     for ( i = 0; i < count; i++ )
     {
@@ -730,8 +731,8 @@
             continue;
         }
 
-        real_mfn = __gpfn_to_mfn(d, gop.mfn);
-        page = pfn_to_page(real_mfn);
+        mfn = __gpfn_to_mfn(d, gop.mfn);
+        page = pfn_to_page(mfn);
         if ( unlikely(IS_XEN_HEAP_FRAME(page)) )
         { 
             DPRINTK("gnttab_transfer: xen frame %lx\n",
@@ -792,21 +793,8 @@
 
         /* Tell the guest about its new page frame. */
         sha = &e->grant_table->shared[gop.ref];
-        if (shadow_mode_translate(e)) {
-            struct domain_mmap_cache c1, c2;
-            unsigned long pfn = sha->frame;
-            domain_mmap_cache_init(&c1);
-            domain_mmap_cache_init(&c2);
-            shadow_lock(e);
-            shadow_sync_and_drop_references(e, page);
-            set_p2m_entry(e, pfn, real_mfn, &c1, &c2);
-            set_pfn_from_mfn(real_mfn, pfn);
-            shadow_unlock(e);
-            domain_mmap_cache_destroy(&c1);
-            domain_mmap_cache_destroy(&c2);
-        } else {
-            sha->frame = real_mfn;
-        }
+        guest_physmap_add_page(e, sha->frame, mfn);
+        sha->frame = mfn;
         wmb();
         sha->flags |= GTF_transfer_completed;
 
diff -r 491a8798945e -r ce057aa33cad xen/common/kernel.c
--- a/xen/common/kernel.c       Sat Jan 28 11:09:45 2006
+++ b/xen/common/kernel.c       Sat Jan 28 12:01:19 2006
@@ -13,7 +13,6 @@
 #include <asm/current.h>
 #include <public/nmi.h>
 #include <public/version.h>
-#include <asm/shadow.h>
 
 void cmdline_parse(char *cmdline)
 {
@@ -156,10 +155,7 @@
         switch ( fi.submap_idx )
         {
         case 0:
-            if (shadow_mode_wr_pt_pte(current->domain))
-                fi.submap = XENFEAT_writable_mmu_structures;
-            else
-                fi.submap = 0;
+            fi.submap = 0;
             break;
         default:
             return -EINVAL;
diff -r 491a8798945e -r ce057aa33cad xen/common/memory.c
--- a/xen/common/memory.c       Sat Jan 28 11:09:45 2006
+++ b/xen/common/memory.c       Sat Jan 28 12:01:19 2006
@@ -75,9 +75,8 @@
     unsigned int   flags,
     int           *preempted)
 {
-    struct pfn_info         *page;
-    unsigned long            i, j, pfn, mfn;
-    struct domain_mmap_cache cache1, cache2;
+    struct pfn_info *page;
+    unsigned long    i, j, pfn, mfn;
 
     if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
         return 0;
@@ -85,12 +84,6 @@
     if ( (extent_order != 0) &&
          !multipage_allocation_permitted(current->domain) )
         return 0;
-
-    if (shadow_mode_translate(d)) {
-        domain_mmap_cache_init(&cache1);
-        domain_mmap_cache_init(&cache2);
-        shadow_lock(d);
-    }
 
     for ( i = 0; i < nr_extents; i++ )
     {
@@ -114,13 +107,16 @@
         if ( unlikely(__get_user(pfn, &extent_list[i]) != 0) )
             goto out;
 
-        for ( j = 0; j < (1 << extent_order); j++ ) {
-            if (shadow_mode_translate(d))
-                set_p2m_entry(d, pfn + j, mfn + j, &cache1, &cache2);
-            set_pfn_from_mfn(mfn + j, pfn + j);
-        }
-
-        if (!shadow_mode_translate(d)) {
+        if ( unlikely(shadow_mode_translate(d)) )
+        {
+            for ( j = 0; j < (1 << extent_order); j++ )
+                guest_physmap_add_page(d, pfn + j, mfn + j);
+        }
+        else
+        {
+            for ( j = 0; j < (1 << extent_order); j++ )
+                set_pfn_from_mfn(mfn + j, pfn + j);
+
             /* Inform the domain of the new page's machine address. */ 
             if ( __put_user(mfn, &extent_list[i]) != 0 )
                 goto out;
@@ -128,12 +124,6 @@
     }
 
  out:
-    if (shadow_mode_translate(d)) {
-        shadow_unlock(d);
-        domain_mmap_cache_destroy(&cache1);
-        domain_mmap_cache_destroy(&cache2);
-    }
-
     return i;
 }
     
@@ -168,8 +158,8 @@
             mfn = __gpfn_to_mfn(d, gpfn + j);
             if ( unlikely(mfn >= max_page) )
             {
-                DPRINTK("Domain %u page number out of range (%lx(%lx) >= 
%lx)\n", 
-                        d->domain_id, mfn, gpfn, max_page);
+                DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
+                        d->domain_id, mfn, max_page);
                 return i;
             }
             
@@ -186,18 +176,8 @@
             if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
                 put_page(page);
 
-            if (shadow_mode_translate(d)) {
-                struct domain_mmap_cache c1, c2;
-                domain_mmap_cache_init(&c1);
-                domain_mmap_cache_init(&c2);
-                shadow_lock(d);
-                shadow_sync_and_drop_references(d, page);
-                set_p2m_entry(d, gpfn + j, -1, &c1, &c2);
-                set_pfn_from_mfn(mfn + j, INVALID_M2P_ENTRY);
-                shadow_unlock(d);
-                domain_mmap_cache_destroy(&c1);
-                domain_mmap_cache_destroy(&c2);
-            }
+            guest_physmap_remove_page(d, gpfn + j, mfn);
+
             put_page(page);
         }
     }
diff -r 491a8798945e -r ce057aa33cad xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Sat Jan 28 11:09:45 2006
+++ b/xen/include/asm-x86/shadow.h      Sat Jan 28 12:01:19 2006
@@ -636,6 +636,44 @@
 }
 #endif
 
+static inline void guest_physmap_add_page(
+    struct domain *d, unsigned long gpfn, unsigned long mfn)
+{
+    struct domain_mmap_cache c1, c2;
+
+    if ( likely(!shadow_mode_translate(d)) )
+        return;
+
+    domain_mmap_cache_init(&c1);
+    domain_mmap_cache_init(&c2);
+    shadow_lock(d);
+    shadow_sync_and_drop_references(d, pfn_to_page(mfn));
+    set_p2m_entry(d, gpfn, mfn, &c1, &c2);
+    set_pfn_from_mfn(mfn, gpfn);
+    shadow_unlock(d);
+    domain_mmap_cache_destroy(&c1);
+    domain_mmap_cache_destroy(&c2);
+}
+
+static inline void guest_physmap_remove_page(
+    struct domain *d, unsigned long gpfn, unsigned long mfn)
+{
+    struct domain_mmap_cache c1, c2;
+
+    if ( likely(!shadow_mode_translate(d)) )
+        return;
+
+    domain_mmap_cache_init(&c1);
+    domain_mmap_cache_init(&c2);
+    shadow_lock(d);
+    shadow_sync_and_drop_references(d, pfn_to_page(mfn));
+    set_p2m_entry(d, gpfn, -1, &c1, &c2);
+    set_pfn_from_mfn(mfn, INVALID_M2P_ENTRY);
+    shadow_unlock(d);
+    domain_mmap_cache_destroy(&c1);
+    domain_mmap_cache_destroy(&c2);
+}
+
 /************************************************************************/
 
 /*
diff -r 491a8798945e -r ce057aa33cad xen/include/xen/shadow.h
--- a/xen/include/xen/shadow.h  Sat Jan 28 11:09:45 2006
+++ b/xen/include/xen/shadow.h  Sat Jan 28 12:01:19 2006
@@ -10,8 +10,15 @@
 
 #else
 
-#define shadow_drop_references(_d, _p)          ((void)0)
-#define shadow_sync_and_drop_references(_d, _p) ((void)0)
+#define shadow_drop_references(d, p)          ((void)0)
+#define shadow_sync_and_drop_references(d, p) ((void)0)
+
+#define shadow_mode_translate(d)              (0)
+
+#define __gpfn_to_mfn(d, p)                   (p)
+#define __mfn_to_gpfn(d, p)                   (p)
+#define guest_physmap_add_page(d, p, m)       ((void)0)
+#define guest_physmap_remove_page(d, p, m)    ((void)0)
 
 #endif
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.