[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [XEN] Fix p2m->shadow callback to pass the mfn being written to



# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1183471079 -3600
# Node ID b8e8061c5a9862d11f95e717c12c451182a0dc76
# Parent  842e085dbb778ef848d58daaadfdd2b78f941ca3
[XEN] Fix p2m->shadow callback to pass the mfn being written to
as well as the pointer and contents.  This was being calculated but
got disconnected from its use when the p2m and shadow functions were
separated.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm/hap/hap.c        |    2 +-
 xen/arch/x86/mm/p2m.c            |   23 +++++++++++++++--------
 xen/arch/x86/mm/shadow/common.c  |    4 ++--
 xen/arch/x86/mm/shadow/private.h |    4 ++--
 xen/include/asm-x86/paging.h     |   16 +++++++++++-----
 5 files changed, 31 insertions(+), 18 deletions(-)

diff -r 842e085dbb77 -r b8e8061c5a98 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Tue Jul 03 13:44:04 2007 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Tue Jul 03 14:57:59 2007 +0100
@@ -672,7 +672,7 @@ static void p2m_install_entry_in_monitor
 
 void 
 hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
-                    l1_pgentry_t new, unsigned int level)
+                    mfn_t table_mfn, l1_pgentry_t new, unsigned int level)
 {
     hap_lock(v->domain);
 
diff -r 842e085dbb77 -r b8e8061c5a98 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Tue Jul 03 13:44:04 2007 +0100
+++ b/xen/arch/x86/mm/p2m.c     Tue Jul 03 14:57:59 2007 +0100
@@ -146,17 +146,20 @@ p2m_next_level(struct domain *d, mfn_t *
 
         switch ( type ) {
         case PGT_l3_page_table:
-            paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 4);
+            paging_write_p2m_entry(d, gfn, 
+                                   p2m_entry, *table_mfn, new_entry, 4);
             break;
         case PGT_l2_page_table:
 #if CONFIG_PAGING_LEVELS == 3
             /* for PAE mode, PDPE only has PCD/PWT/P bits available */
             new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), _PAGE_PRESENT);
 #endif
-            paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 3);
+            paging_write_p2m_entry(d, gfn, 
+                                   p2m_entry, *table_mfn, new_entry, 3);
             break;
         case PGT_l1_page_table:
-            paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 2);
+            paging_write_p2m_entry(d, gfn, 
+                                   p2m_entry, *table_mfn, new_entry, 2);
             break;
         default:
             BUG();
@@ -222,7 +225,7 @@ set_p2m_entry(struct domain *d, unsigned
         entry_content = l1e_empty();
 
     /* level 1 entry */
-    paging_write_p2m_entry(d, gfn, p2m_entry, entry_content, 1);
+    paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1);
 
     /* Success */
     rv = 1;
@@ -707,6 +710,7 @@ void p2m_set_flags_global(struct domain 
     l1_pgentry_t l1e_content;
     l1_pgentry_t *l1e;
     l2_pgentry_t *l2e;
+    mfn_t l1mfn;
     int i1, i2;
 #if CONFIG_PAGING_LEVELS >= 3
     l3_pgentry_t *l3e;
@@ -741,7 +745,7 @@ void p2m_set_flags_global(struct domain 
        {
            continue;
        }
-       l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4]))));
+       l3e = map_domain_page(l4e_get_pfn(l4e[i4]));
 #endif /* now at levels 3 or 4... */
        for ( i3 = 0; 
              i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8); 
@@ -751,7 +755,7 @@ void p2m_set_flags_global(struct domain 
            {
                continue;
            }
-           l2e = map_domain_page(mfn_x(_mfn(l3e_get_pfn(l3e[i3]))));
+           l2e = map_domain_page(l3e_get_pfn(l3e[i3]));
 #endif /* all levels... */
            for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
            {
@@ -759,7 +763,9 @@ void p2m_set_flags_global(struct domain 
                {
                    continue;
                }
-               l1e = map_domain_page(mfn_x(_mfn(l2e_get_pfn(l2e[i2]))));
+
+                l1mfn = _mfn(l2e_get_pfn(l2e[i2]));
+               l1e = map_domain_page(mfn_x(l1mfn));
                
                for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
                {
@@ -769,7 +775,8 @@ void p2m_set_flags_global(struct domain 
                    gfn = get_gpfn_from_mfn(mfn);
                    /* create a new 1le entry using l1e_flags */
                    l1e_content = l1e_from_pfn(mfn, l1e_flags);
-                   paging_write_p2m_entry(d, gfn, &l1e[i1], l1e_content, 1);
+                   paging_write_p2m_entry(d, gfn, &l1e[i1], 
+                                           l1mfn, l1e_content, 1);
                }
                unmap_domain_page(l1e);
            }
diff -r 842e085dbb77 -r b8e8061c5a98 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Tue Jul 03 13:44:04 2007 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Tue Jul 03 14:57:59 2007 +0100
@@ -2733,11 +2733,11 @@ static int shadow_test_disable(struct do
  * shadow processing jobs.
  */
 void
-shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p, 
+shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn, 
+                       l1_pgentry_t *p, mfn_t table_mfn, 
                        l1_pgentry_t new, unsigned int level)
 {
     struct domain *d = v->domain;
-    mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table);
     mfn_t mfn;
     
     shadow_lock(d);
diff -r 842e085dbb77 -r b8e8061c5a98 xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Tue Jul 03 13:44:04 2007 +0100
+++ b/xen/arch/x86/mm/shadow/private.h  Tue Jul 03 14:57:59 2007 +0100
@@ -392,8 +392,8 @@ void shadow_free_p2m_page(struct domain 
 
 /* Functions that atomically write PT/P2M entries and update state */
 void shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn, 
-                            l1_pgentry_t *p, l1_pgentry_t new, 
-                            unsigned int level);
+                            l1_pgentry_t *p, mfn_t table_mfn,
+                            l1_pgentry_t new, unsigned int level);
 int shadow_write_guest_entry(struct vcpu *v, intpte_t *p,
                              intpte_t new, mfn_t gmfn);
 int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p,
diff -r 842e085dbb77 -r b8e8061c5a98 xen/include/asm-x86/paging.h
--- a/xen/include/asm-x86/paging.h      Tue Jul 03 13:44:04 2007 +0100
+++ b/xen/include/asm-x86/paging.h      Tue Jul 03 14:57:59 2007 +0100
@@ -122,7 +122,8 @@ struct paging_mode {
     void          (*update_cr3            )(struct vcpu *v, int do_locking);
     void          (*update_paging_modes   )(struct vcpu *v);
     void          (*write_p2m_entry       )(struct vcpu *v, unsigned long gfn,
-                                            l1_pgentry_t *p, l1_pgentry_t new, 
+                                            l1_pgentry_t *p, mfn_t table_mfn, 
+                                            l1_pgentry_t new, 
                                             unsigned int level);
     int           (*write_guest_entry     )(struct vcpu *v, intpte_t *p,
                                             intpte_t new, mfn_t gmfn);
@@ -291,17 +292,22 @@ static inline void safe_write_pte(l1_pge
 }
 
 /* Atomically write a P2M entry and update the paging-assistance state 
- * appropriately. */
+ * appropriately. 
+ * Arguments: the domain in question, the GFN whose mapping is being updated, 
+ * a pointer to the entry to be written, the MFN in which the entry resides, 
+ * the new contents of the entry, and the level in the p2m tree at which 
+ * we are writing. */
 static inline void paging_write_p2m_entry(struct domain *d, unsigned long gfn, 
-                                          l1_pgentry_t *p, l1_pgentry_t new, 
-                                          unsigned int level)
+                                          l1_pgentry_t *p, mfn_t table_mfn,
+                                          l1_pgentry_t new, unsigned int level)
 {
     struct vcpu *v = current;
     if ( v->domain != d )
         v = d->vcpu[0];
     if ( likely(v && paging_mode_enabled(d) && v->arch.paging.mode != NULL) )
     {
-        return v->arch.paging.mode->write_p2m_entry(v, gfn, p, new, level);
+        return v->arch.paging.mode->write_p2m_entry(v, gfn, p, table_mfn,
+                                                    new, level);
     }
     else 
         safe_write_pte(p, new);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.