[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] merge



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 2d734ac9ec85de6c3363091f72c8e4a9cda0c75d
# Parent  487fe5006064d516274efaf494f16dae9e9833b4
# Parent  0882db2677b05d6193eda01b77afad99150c6d03
merge
---
 xen/arch/x86/shadow2-common.c         |    8 +++---
 xen/arch/x86/shadow2.c                |   41 ++++++++++++++++++++++++++--------
 xen/include/asm-x86/shadow2-private.h |   19 ---------------
 3 files changed, 36 insertions(+), 32 deletions(-)

diff -r 487fe5006064 -r 2d734ac9ec85 xen/arch/x86/shadow2-common.c
--- a/xen/arch/x86/shadow2-common.c     Thu Aug 17 16:28:47 2006 +0100
+++ b/xen/arch/x86/shadow2-common.c     Thu Aug 17 16:29:21 2006 +0100
@@ -2165,9 +2165,6 @@ void sh2_remove_shadows(struct vcpu *v, 
         0  /* unused  */
     };
 
-    SHADOW2_PRINTK("d=%d, v=%d, gmfn=%05lx\n",
-                   v->domain->domain_id, v->vcpu_id, mfn_x(gmfn));
-
     ASSERT(shadow2_lock_is_acquired(v->domain));
 
     pg = mfn_to_page(gmfn);
@@ -2175,6 +2172,9 @@ void sh2_remove_shadows(struct vcpu *v, 
     /* Bale out now if the page is not shadowed */
     if ( (pg->count_info & PGC_page_table) == 0 )
         return;
+
+    SHADOW2_PRINTK("d=%d, v=%d, gmfn=%05lx\n",
+                   v->domain->domain_id, v->vcpu_id, mfn_x(gmfn));
 
     /* Search for this shadow in all appropriate shadows */
     perfc_incrc(shadow2_unshadow);
@@ -2843,7 +2843,7 @@ sh2_p2m_remove_page(struct domain *d, un
         v = d->vcpu[0];
 
 
-    SHADOW2_PRINTK("removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
+    SHADOW2_DEBUG(P2M, "removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
 
     ASSERT(mfn_x(sh2_gfn_to_mfn(d, gfn)) == mfn);
     //ASSERT(sh2_mfn_to_gfn(d, mfn) == gfn);
diff -r 487fe5006064 -r 2d734ac9ec85 xen/arch/x86/shadow2.c
--- a/xen/arch/x86/shadow2.c    Thu Aug 17 16:28:47 2006 +0100
+++ b/xen/arch/x86/shadow2.c    Thu Aug 17 16:29:21 2006 +0100
@@ -234,6 +234,28 @@ delete_shadow2_status(struct vcpu *v, mf
     put_page(mfn_to_page(gmfn));
 }
 
+/**************************************************************************/
+/* CPU feature support querying */
+
+static inline int
+guest_supports_superpages(struct vcpu *v)
+{
+    /* The _PAGE_PSE bit must be honoured in HVM guests, whenever
+     * CR4.PSE is set or the guest is in PAE or long mode */
+    return (hvm_guest(v) && (GUEST_PAGING_LEVELS != 2 
+                             || (hvm_get_guest_ctrl_reg(v, 4) & X86_CR4_PSE)));
+}
+
+static inline int
+guest_supports_nx(struct vcpu *v)
+{
+    if ( !hvm_guest(v) )
+        return cpu_has_nx;
+
+    // XXX - fix this!
+    return 1;
+}
+
 
 /**************************************************************************/
 /* Functions for walking the guest page tables */
@@ -482,9 +504,11 @@ static u32 guest_set_ad_bits(struct vcpu
     if ( unlikely(GUEST_PAGING_LEVELS == 3 && level == 3) )
         return flags;
 
-    /* Need the D bit as well for writes, in l1es and PSE l2es. */
+    /* Need the D bit as well for writes, in l1es and 32bit/PAE PSE l2es. */
     if ( ft == ft_demand_write  
-         && (level == 1 || (level == 2 && (flags & _PAGE_PSE))) )
+         && (level == 1 || 
+             (level == 2 && GUEST_PAGING_LEVELS < 4 
+              && (flags & _PAGE_PSE) && guest_supports_superpages(v))) )
     {
         if ( (flags & (_PAGE_DIRTY | _PAGE_ACCESSED)) 
              == (_PAGE_DIRTY | _PAGE_ACCESSED) )
@@ -709,7 +733,6 @@ sh2_propagate_flags(struct vcpu *v, mfn_
     struct domain *d = v->domain;
     u32 pass_thru_flags;
     u32 sflags;
-    int lowest_level_guest_mapping;
 
     // XXX -- might want to think about PAT support for HVM guests...
 
@@ -782,10 +805,6 @@ sh2_propagate_flags(struct vcpu *v, mfn_
     if ( (level > 1) && !((SHADOW_PAGING_LEVELS == 3) && (level == 3)) )
         sflags |= _PAGE_ACCESSED | _PAGE_DIRTY;
 
-    lowest_level_guest_mapping =
-        ((level == 1) ||
-         ((level == 2) && guest_supports_superpages(v) &&
-          (gflags & _PAGE_PSE)));
 
     // Set the A and D bits in the guest entry, if we need to.
     if ( guest_entry_ptr && (ft & FETCH_TYPE_DEMAND) )
@@ -798,8 +817,12 @@ sh2_propagate_flags(struct vcpu *v, mfn_
                   !(gflags & _PAGE_ACCESSED)) )
         sflags &= ~_PAGE_PRESENT;
 
-    if ( unlikely(lowest_level_guest_mapping &&
-                  !(gflags & _PAGE_DIRTY)) )
+    /* D bits exist in l1es, and 32bit/PAE PSE l2es, but not 64bit PSE l2es */
+    if ( unlikely( ((level == 1) 
+                    || ((level == 2) && (GUEST_PAGING_LEVELS < 4) 
+                        && guest_supports_superpages(v) &&
+                        (gflags & _PAGE_PSE)))
+                   && !(gflags & _PAGE_DIRTY)) )
         sflags &= ~_PAGE_RW;
 
     // MMIO caching
diff -r 487fe5006064 -r 2d734ac9ec85 xen/include/asm-x86/shadow2-private.h
--- a/xen/include/asm-x86/shadow2-private.h     Thu Aug 17 16:28:47 2006 +0100
+++ b/xen/include/asm-x86/shadow2-private.h     Thu Aug 17 16:29:21 2006 +0100
@@ -530,25 +530,6 @@ static inline void sh2_unpin(struct vcpu
         page->count_info &= ~PGC_SH2_pinned;
         sh2_put_ref(v, smfn, 0);
     }
-}
-
-/**************************************************************************/
-/* CPU feature support querying */
-
-static inline int
-guest_supports_superpages(struct vcpu *v)
-{
-    return hvm_guest(v) && (hvm_get_guest_ctrl_reg(v, 4) & X86_CR4_PSE);
-}
-
-static inline int
-guest_supports_nx(struct vcpu *v)
-{
-    if ( !hvm_guest(v) )
-        return cpu_has_nx;
-
-    // XXX - fix this!
-    return 1;
 }
 
 /**************************************************************************/

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.