[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/shadow: whitespace cleanup



commit a9533e536cc9ed75f424a935ab5bfc52f30e1aa4
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue Feb 17 14:29:48 2015 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Feb 17 14:29:48 2015 +0100

    x86/shadow: whitespace cleanup
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c  |  490 ++++++++++++------------
 xen/arch/x86/mm/shadow/multi.c   |  770 +++++++++++++++++++-------------------
 xen/arch/x86/mm/shadow/multi.h   |   36 +-
 xen/arch/x86/mm/shadow/private.h |  106 +++---
 xen/arch/x86/mm/shadow/types.h   |   42 +-
 5 files changed, 722 insertions(+), 722 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 3630ae0..502e0d8 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -5,7 +5,7 @@
  * Parts of this code are Copyright (c) 2006 by XenSource Inc.
  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
  * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -252,7 +252,7 @@ hvm_emulate_write(enum x86_segment seg,
         v, addr, p_data, bytes, sh_ctxt);
 }
 
-static int 
+static int
 hvm_emulate_cmpxchg(enum x86_segment seg,
                     unsigned long offset,
                     void *p_old,
@@ -329,7 +329,7 @@ pv_emulate_write(enum x86_segment seg,
         v, offset, p_data, bytes, sh_ctxt);
 }
 
-static int 
+static int
 pv_emulate_cmpxchg(enum x86_segment seg,
                    unsigned long offset,
                    void *p_old,
@@ -409,9 +409,9 @@ const struct x86_emulate_ops *shadow_init_emulation(
     return &hvm_shadow_emulator_ops;
 }
 
-/* Update an initialized emulation context to prepare for the next 
+/* Update an initialized emulation context to prepare for the next
  * instruction */
-void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt, 
+void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
                                struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
@@ -437,17 +437,17 @@ void shadow_continue_emulation(struct sh_emulate_ctxt 
*sh_ctxt,
         }
     }
 }
- 
+
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
 /**************************************************************************/
-/* Out-of-sync shadows. */ 
+/* Out-of-sync shadows. */
 
-/* From time to time, we let a shadowed pagetable page go out of sync 
- * with its shadow: the guest is allowed to write directly to the page, 
+/* From time to time, we let a shadowed pagetable page go out of sync
+ * with its shadow: the guest is allowed to write directly to the page,
  * and those writes are not synchronously reflected in the shadow.
- * This lets us avoid many emulations if the guest is writing a lot to a 
- * pagetable, but it relaxes a pretty important invariant in the shadow 
+ * This lets us avoid many emulations if the guest is writing a lot to a
+ * pagetable, but it relaxes a pretty important invariant in the shadow
  * pagetable design.  Therefore, some rules:
  *
  * 1. Only L1 pagetables may go out of sync: any page that is shadowed
@@ -455,21 +455,21 @@ void shadow_continue_emulation(struct sh_emulate_ctxt 
*sh_ctxt,
  *    using linear shadow pagetables much less dangerous.
  *    That means that: (a) unsyncing code needs to check for higher-level
  *    shadows, and (b) promotion code needs to resync.
- * 
+ *
  * 2. All shadow operations on a guest page require the page to be brought
  *    back into sync before proceeding.  This must be done under the
  *    paging lock so that the page is guaranteed to remain synced until
  *    the operation completes.
  *
- *    Exceptions to this rule: the pagefault and invlpg handlers may 
- *    update only one entry on an out-of-sync page without resyncing it. 
+ *    Exceptions to this rule: the pagefault and invlpg handlers may
+ *    update only one entry on an out-of-sync page without resyncing it.
  *
  * 3. Operations on shadows that do not start from a guest page need to
  *    be aware that they may be handling an out-of-sync shadow.
  *
- * 4. Operations that do not normally take the paging lock (fast-path 
- *    #PF handler, INVLPG) must fall back to a locking, syncing version 
- *    if they see an out-of-sync table. 
+ * 4. Operations that do not normally take the paging lock (fast-path
+ *    #PF handler, INVLPG) must fall back to a locking, syncing version
+ *    if they see an out-of-sync table.
  *
  * 5. Operations corresponding to guest TLB flushes (MOV CR3, INVLPG)
  *    must explicitly resync all relevant pages or update their
@@ -488,26 +488,26 @@ void shadow_continue_emulation(struct sh_emulate_ctxt 
*sh_ctxt,
 
 
 #if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_FULL
-static void sh_oos_audit(struct domain *d) 
+static void sh_oos_audit(struct domain *d)
 {
     int idx, expected_idx, expected_idx_alt;
     struct page_info *pg;
     struct vcpu *v;
-    
-    for_each_vcpu(d, v) 
+
+    for_each_vcpu(d, v)
     {
         for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ )
         {
             mfn_t *oos = v->arch.paging.shadow.oos;
             if ( !mfn_valid(oos[idx]) )
                 continue;
-            
+
             expected_idx = mfn_x(oos[idx]) % SHADOW_OOS_PAGES;
             expected_idx_alt = ((expected_idx + 1) % SHADOW_OOS_PAGES);
             if ( idx != expected_idx && idx != expected_idx_alt )
             {
                 printk("%s: idx %d contains gmfn %lx, expected at %d or %d.\n",
-                       __func__, idx, mfn_x(oos[idx]), 
+                       __func__, idx, mfn_x(oos[idx]),
                        expected_idx, expected_idx_alt);
                 BUG();
             }
@@ -536,21 +536,21 @@ static void sh_oos_audit(struct domain *d)
 #endif
 
 #if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES
-void oos_audit_hash_is_present(struct domain *d, mfn_t gmfn) 
+void oos_audit_hash_is_present(struct domain *d, mfn_t gmfn)
 {
     int idx;
     struct vcpu *v;
     mfn_t *oos;
 
     ASSERT(mfn_is_out_of_sync(gmfn));
-    
-    for_each_vcpu(d, v) 
+
+    for_each_vcpu(d, v)
     {
         oos = v->arch.paging.shadow.oos;
         idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
         if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
             idx = (idx + 1) % SHADOW_OOS_PAGES;
-        
+
         if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
             return;
     }
@@ -593,7 +593,7 @@ static inline int oos_fixup_flush_gmfn(struct vcpu *v, 
mfn_t gmfn,
         if ( mfn_x(fixup->smfn[i]) != INVALID_MFN )
         {
             sh_remove_write_access_from_sl1p(v, gmfn,
-                                             fixup->smfn[i], 
+                                             fixup->smfn[i],
                                              fixup->off[i]);
             fixup->smfn[i] = _mfn(INVALID_MFN);
         }
@@ -612,8 +612,8 @@ void oos_fixup_add(struct vcpu *v, mfn_t gmfn,
     struct domain *d = v->domain;
 
     perfc_incr(shadow_oos_fixup_add);
-    
-    for_each_vcpu(d, v) 
+
+    for_each_vcpu(d, v)
     {
         oos = v->arch.paging.shadow.oos;
         oos_fixup = v->arch.paging.shadow.oos_fixup;
@@ -638,7 +638,7 @@ void oos_fixup_add(struct vcpu *v, mfn_t gmfn,
                 TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_OOS_FIXUP_EVICT);
 
                 /* Reuse this slot and remove current writable mapping. */
-                sh_remove_write_access_from_sl1p(v, gmfn, 
+                sh_remove_write_access_from_sl1p(v, gmfn,
                                                  oos_fixup[idx].smfn[next],
                                                  oos_fixup[idx].off[next]);
                 perfc_incr(shadow_oos_fixup_evict);
@@ -681,7 +681,7 @@ static int oos_remove_write_access(struct vcpu *v, mfn_t 
gmfn,
 
     case -1:
         /* An unfindable writeable typecount has appeared, probably via a
-         * grant table entry: can't shoot the mapping, so try to unshadow 
+         * grant table entry: can't shoot the mapping, so try to unshadow
          * the page.  If that doesn't work either, the guest is granting
          * his pagetables and must be killed after all.
          * This will flush the tlb, so we can return with no worries. */
@@ -715,7 +715,7 @@ static void _sh_resync(struct vcpu *v, mfn_t gmfn,
     ASSERT(paging_locked_by_me(v->domain));
     ASSERT(mfn_is_out_of_sync(gmfn));
     /* Guest page must be shadowed *only* as L1 when out of sync. */
-    ASSERT(!(mfn_to_page(gmfn)->shadow_flags & SHF_page_type_mask 
+    ASSERT(!(mfn_to_page(gmfn)->shadow_flags & SHF_page_type_mask
              & ~SHF_L1_ANY));
     ASSERT(!sh_page_has_multiple_shadows(mfn_to_page(gmfn)));
 
@@ -751,14 +751,14 @@ static void oos_hash_add(struct vcpu *v, mfn_t gmfn)
     mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
     struct oos_fixup *oos_fixup = v->arch.paging.shadow.oos_fixup;
     struct oos_fixup fixup = { .next = 0 };
-    
+
     for (i = 0; i < SHADOW_OOS_FIXUPS; i++ )
         fixup.smfn[i] = _mfn(INVALID_MFN);
 
     idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
     oidx = idx;
 
-    if ( mfn_valid(oos[idx]) 
+    if ( mfn_valid(oos[idx])
          && (mfn_x(oos[idx]) % SHADOW_OOS_PAGES) == idx )
     {
         /* Punt the current occupant into the next slot */
@@ -795,7 +795,7 @@ static void oos_hash_remove(struct vcpu *v, mfn_t gmfn)
 
     SHADOW_PRINTK("%pv gmfn %lx\n", v, mfn_x(gmfn));
 
-    for_each_vcpu(d, v) 
+    for_each_vcpu(d, v)
     {
         oos = v->arch.paging.shadow.oos;
         idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
@@ -818,8 +818,8 @@ mfn_t oos_snapshot_lookup(struct vcpu *v, mfn_t gmfn)
     mfn_t *oos;
     mfn_t *oos_snapshot;
     struct domain *d = v->domain;
-    
-    for_each_vcpu(d, v) 
+
+    for_each_vcpu(d, v)
     {
         oos = v->arch.paging.shadow.oos;
         oos_snapshot = v->arch.paging.shadow.oos_snapshot;
@@ -846,7 +846,7 @@ void sh_resync(struct vcpu *v, mfn_t gmfn)
     struct oos_fixup *oos_fixup;
     struct domain *d = v->domain;
 
-    for_each_vcpu(d, v) 
+    for_each_vcpu(d, v)
     {
         oos = v->arch.paging.shadow.oos;
         oos_fixup = v->arch.paging.shadow.oos_fixup;
@@ -854,7 +854,7 @@ void sh_resync(struct vcpu *v, mfn_t gmfn)
         idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
         if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
             idx = (idx + 1) % SHADOW_OOS_PAGES;
-        
+
         if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
         {
             _sh_resync(v, gmfn, &oos_fixup[idx], oos_snapshot[idx]);
@@ -905,7 +905,7 @@ void sh_resync_all(struct vcpu *v, int skip, int this, int 
others)
         goto resync_others;
 
     /* First: resync all of this vcpu's oos pages */
-    for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ ) 
+    for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ )
         if ( mfn_valid(oos[idx]) )
         {
             /* Write-protect and sync contents */
@@ -920,14 +920,14 @@ void sh_resync_all(struct vcpu *v, int skip, int this, 
int others)
     /* Second: make all *other* vcpus' oos pages safe. */
     for_each_vcpu(v->domain, other)
     {
-        if ( v == other ) 
+        if ( v == other )
             continue;
 
         oos = other->arch.paging.shadow.oos;
         oos_fixup = other->arch.paging.shadow.oos_fixup;
         oos_snapshot = other->arch.paging.shadow.oos_snapshot;
 
-        for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ ) 
+        for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ )
         {
             if ( !mfn_valid(oos[idx]) )
                 continue;
@@ -946,7 +946,7 @@ void sh_resync_all(struct vcpu *v, int skip, int this, int 
others)
                 _sh_resync(other, oos[idx], &oos_fixup[idx], 
oos_snapshot[idx]);
                 oos[idx] = _mfn(INVALID_MFN);
             }
-        }        
+        }
     }
 }
 
@@ -955,19 +955,19 @@ void sh_resync_all(struct vcpu *v, int skip, int this, 
int others)
 int sh_unsync(struct vcpu *v, mfn_t gmfn)
 {
     struct page_info *pg;
-    
+
     ASSERT(paging_locked_by_me(v->domain));
 
     SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx\n",
                   v->domain->domain_id, v->vcpu_id, mfn_x(gmfn));
 
     pg = mfn_to_page(gmfn);
- 
+
     /* Guest page must be shadowed *only* as L1 and *only* once when out
-     * of sync.  Also, get out now if it's already out of sync. 
+     * of sync.  Also, get out now if it's already out of sync.
      * Also, can't safely unsync if some vcpus have paging disabled.*/
-    if ( pg->shadow_flags & 
-         ((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync) 
+    if ( pg->shadow_flags &
+         ((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync)
          || sh_page_has_multiple_shadows(pg)
          || is_pv_domain(v->domain)
          || !v->domain->arch.paging.shadow.oos_active )
@@ -995,9 +995,9 @@ void shadow_promote(struct vcpu *v, mfn_t gmfn, unsigned 
int type)
 
     ASSERT(mfn_valid(gmfn));
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     /* Is the page already shadowed and out of sync? */
-    if ( page_is_out_of_sync(page) ) 
+    if ( page_is_out_of_sync(page) )
         sh_resync(v, gmfn);
 #endif
 
@@ -1026,13 +1026,13 @@ void shadow_demote(struct vcpu *v, mfn_t gmfn, u32 type)
 
     if ( (page->shadow_flags & SHF_page_type_mask) == 0 )
     {
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
         /* Was the page out of sync? */
-        if ( page_is_out_of_sync(page) ) 
+        if ( page_is_out_of_sync(page) )
         {
             oos_hash_remove(v, gmfn);
         }
-#endif 
+#endif
         clear_bit(_PGC_page_table, &page->count_info);
     }
 
@@ -1050,11 +1050,11 @@ sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, 
void *entry, u32 size)
     struct page_info *page = mfn_to_page(gmfn);
 
     paging_mark_dirty(v->domain, mfn_x(gmfn));
-    
+
     // Determine which types of shadows are affected, and update each.
     //
     // Always validate L1s before L2s to prevent another cpu with a linear
-    // mapping of this gmfn from seeing a walk that results from 
+    // mapping of this gmfn from seeing a walk that results from
     // using the new L2 value and the old L1 value.  (It is OK for such a
     // guest to see a walk that uses the old L2 value with the new L1 value,
     // as hardware could behave this way if one level of the pagewalk occurs
@@ -1067,40 +1067,40 @@ sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, 
void *entry, u32 size)
     if ( !(page->count_info & PGC_page_table) )
         return 0;  /* Not shadowed at all */
 
-    if ( page->shadow_flags & SHF_L1_32 ) 
+    if ( page->shadow_flags & SHF_L1_32 )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 2)
             (v, gmfn, entry, size);
-    if ( page->shadow_flags & SHF_L2_32 ) 
+    if ( page->shadow_flags & SHF_L2_32 )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 2)
             (v, gmfn, entry, size);
 
-    if ( page->shadow_flags & SHF_L1_PAE ) 
+    if ( page->shadow_flags & SHF_L1_PAE )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 3)
             (v, gmfn, entry, size);
-    if ( page->shadow_flags & SHF_L2_PAE ) 
+    if ( page->shadow_flags & SHF_L2_PAE )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 3)
             (v, gmfn, entry, size);
-    if ( page->shadow_flags & SHF_L2H_PAE ) 
+    if ( page->shadow_flags & SHF_L2H_PAE )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 3)
             (v, gmfn, entry, size);
 
-    if ( page->shadow_flags & SHF_L1_64 ) 
+    if ( page->shadow_flags & SHF_L1_64 )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 4)
             (v, gmfn, entry, size);
-    if ( page->shadow_flags & SHF_L2_64 ) 
+    if ( page->shadow_flags & SHF_L2_64 )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 4)
             (v, gmfn, entry, size);
-    if ( page->shadow_flags & SHF_L2H_64 ) 
+    if ( page->shadow_flags & SHF_L2H_64 )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 4)
             (v, gmfn, entry, size);
-    if ( page->shadow_flags & SHF_L3_64 ) 
+    if ( page->shadow_flags & SHF_L3_64 )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, 4)
             (v, gmfn, entry, size);
-    if ( page->shadow_flags & SHF_L4_64 ) 
+    if ( page->shadow_flags & SHF_L4_64 )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, 4)
             (v, gmfn, entry, size);
 
-    this_cpu(trace_shadow_path_flags) |= (result<<(TRCE_SFLAG_SET_CHANGED)); 
+    this_cpu(trace_shadow_path_flags) |= (result<<(TRCE_SFLAG_SET_CHANGED));
 
     return result;
 }
@@ -1121,12 +1121,12 @@ sh_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn,
     if ( rc & SHADOW_SET_FLUSH )
         /* Need to flush TLBs to pick up shadow PT changes */
         flush_tlb_mask(d->domain_dirty_cpumask);
-    if ( rc & SHADOW_SET_ERROR ) 
+    if ( rc & SHADOW_SET_ERROR )
     {
-        /* This page is probably not a pagetable any more: tear it out of the 
-         * shadows, along with any tables that reference it.  
-         * Since the validate call above will have made a "safe" (i.e. zero) 
-         * shadow entry, we can let the domain live even if we can't fully 
+        /* This page is probably not a pagetable any more: tear it out of the
+         * shadows, along with any tables that reference it.
+         * Since the validate call above will have made a "safe" (i.e. zero)
+         * shadow entry, we can let the domain live even if we can't fully
          * unshadow the page. */
         sh_remove_shadows(v, gmfn, 0, 0);
     }
@@ -1134,7 +1134,7 @@ sh_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn,
 
 int shadow_write_guest_entry(struct vcpu *v, intpte_t *p,
                              intpte_t new, mfn_t gmfn)
-/* Write a new value into the guest pagetable, and update the shadows 
+/* Write a new value into the guest pagetable, and update the shadows
  * appropriately.  Returns 0 if we page-faulted, 1 for success. */
 {
     int failed;
@@ -1148,7 +1148,7 @@ int shadow_write_guest_entry(struct vcpu *v, intpte_t *p,
 
 int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p,
                                intpte_t *old, intpte_t new, mfn_t gmfn)
-/* Cmpxchg a new value into the guest pagetable, and update the shadows 
+/* Cmpxchg a new value into the guest pagetable, and update the shadows
  * appropriately. Returns 0 if we page-faulted, 1 if not.
  * N.B. caller should check the value of "old" to see if the
  * cmpxchg itself was successful. */
@@ -1166,7 +1166,7 @@ int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t 
*p,
 
 
 /**************************************************************************/
-/* Memory management for shadow pages. */ 
+/* Memory management for shadow pages. */
 
 /* Allocating shadow pages
  * -----------------------
@@ -1180,12 +1180,12 @@ int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t 
*p,
  * PAE/64-bit l2 tables (1GB va each).  These multi-page shadows are
  * not contiguous in memory; functions for handling offsets into them are
  * defined in shadow/multi.c (shadow_l1_index() etc.)
- *    
+ *
  * This table shows the allocation behaviour of the different modes:
  *
  * Xen paging      64b  64b  64b
  * Guest paging    32b  pae  64b
- * PV or HVM       HVM  HVM   * 
+ * PV or HVM       HVM  HVM   *
  * Shadow paging   pae  pae  64b
  *
  * sl1 size         8k   4k   4k
@@ -1193,8 +1193,8 @@ int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t 
*p,
  * sl3 size         -    -    4k
  * sl4 size         -    -    4k
  *
- * In HVM guests, the p2m table is built out of shadow pages, and we provide 
- * a function for the p2m management to steal pages, in max-order chunks, from 
+ * In HVM guests, the p2m table is built out of shadow pages, and we provide
+ * a function for the p2m management to steal pages, in max-order chunks, from
  * the free pool.
  */
 
@@ -1221,15 +1221,15 @@ const u8 sh_type_to_size[] = {
 /* Figure out the least acceptable quantity of shadow memory.
  * The minimum memory requirement for always being able to free up a
  * chunk of memory is very small -- only three max-order chunks per
- * vcpu to hold the top level shadows and pages with Xen mappings in them.  
+ * vcpu to hold the top level shadows and pages with Xen mappings in them.
  *
  * But for a guest to be guaranteed to successfully execute a single
  * instruction, we must be able to map a large number (about thirty) VAs
  * at the same time, which means that to guarantee progress, we must
  * allow for more than ninety allocated pages per vcpu.  We round that
- * up to 128 pages, or half a megabyte per vcpu, and add 1 more vcpu's 
+ * up to 128 pages, or half a megabyte per vcpu, and add 1 more vcpu's
  * worth to make sure we never return zero. */
-static unsigned int shadow_min_acceptable_pages(struct domain *d) 
+static unsigned int shadow_min_acceptable_pages(struct domain *d)
 {
     u32 vcpu_count = 1;
     struct vcpu *v;
@@ -1238,7 +1238,7 @@ static unsigned int shadow_min_acceptable_pages(struct 
domain *d)
         vcpu_count++;
 
     return (vcpu_count * 128);
-} 
+}
 
 /* Dispatcher function: call the per-mode function that will unhook the
  * non-Xen mappings in this top-level shadow mfn.  With user_only == 1,
@@ -1290,7 +1290,7 @@ static void _shadow_prealloc(
     int i;
 
     if ( d->arch.paging.shadow.free_pages >= pages ) return;
-    
+
     v = current;
     if ( v->domain != d )
         v = d->vcpu[0];
@@ -1315,13 +1315,13 @@ static void _shadow_prealloc(
      * mappings. */
     perfc_incr(shadow_prealloc_2);
 
-    for_each_vcpu(d, v2) 
+    for_each_vcpu(d, v2)
         for ( i = 0 ; i < 4 ; i++ )
         {
             if ( !pagetable_is_null(v2->arch.shadow_table[i]) )
             {
                 TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_PREALLOC_UNHOOK);
-                shadow_unhook_mappings(v, 
+                shadow_unhook_mappings(v,
                                pagetable_get_mfn(v2->arch.shadow_table[i]), 0);
 
                 /* See if that freed up enough space */
@@ -1332,7 +1332,7 @@ static void _shadow_prealloc(
                 }
             }
         }
-    
+
     /* Nothing more we can do: all remaining shadows are of pages that
      * hold Xen mappings for some vcpu.  This can never happen. */
     SHADOW_ERROR("Can't pre-allocate %u shadow pages!\n"
@@ -1356,7 +1356,7 @@ void shadow_prealloc(struct domain *d, u32 type, unsigned 
int count)
 
 /* Deliberately free all the memory we can: this will tear down all of
  * this domain's shadows */
-static void shadow_blow_tables(struct domain *d) 
+static void shadow_blow_tables(struct domain *d)
 {
     struct page_info *sp, *t;
     struct vcpu *v = d->vcpu[0];
@@ -1371,12 +1371,12 @@ static void shadow_blow_tables(struct domain *d)
         smfn = page_to_mfn(sp);
         sh_unpin(v, smfn);
     }
-        
+
     /* Second pass: unhook entries of in-use shadows */
-    for_each_vcpu(d, v) 
+    for_each_vcpu(d, v)
         for ( i = 0 ; i < 4 ; i++ )
             if ( !pagetable_is_null(v->arch.shadow_table[i]) )
-                shadow_unhook_mappings(v, 
+                shadow_unhook_mappings(v,
                                pagetable_get_mfn(v->arch.shadow_table[i]), 0);
 
     /* Make sure everyone sees the unshadowings */
@@ -1441,9 +1441,9 @@ set_next_shadow(struct page_info *sp, struct page_info 
*next)
 }
 
 /* Allocate another shadow's worth of (contiguous, aligned) pages,
- * and fill in the type and backpointer fields of their page_infos. 
+ * and fill in the type and backpointer fields of their page_infos.
  * Never fails to allocate. */
-mfn_t shadow_alloc(struct domain *d,  
+mfn_t shadow_alloc(struct domain *d,
                     u32 shadow_type,
                     unsigned long backpointer)
 {
@@ -1485,10 +1485,10 @@ mfn_t shadow_alloc(struct domain *d,
     INIT_PAGE_LIST_HEAD(&tmp_list);
 
     /* Init page info fields and clear the pages */
-    for ( i = 0; i < pages ; i++ ) 
+    for ( i = 0; i < pages ; i++ )
     {
         sp = page_list_remove_head(&d->arch.paging.shadow.freelist);
-        /* Before we overwrite the old contents of this page, 
+        /* Before we overwrite the old contents of this page,
          * we need to be sure that no TLB holds a pointer to it. */
         cpumask_copy(&mask, d->domain_dirty_cpumask);
         tlbflush_filter(mask, sp->tlbflush_timestamp);
@@ -1512,7 +1512,7 @@ mfn_t shadow_alloc(struct domain *d,
         set_next_shadow(sp, NULL);
         perfc_incr(shadow_alloc_count);
     }
-    if ( shadow_type >= SH_type_min_shadow 
+    if ( shadow_type >= SH_type_min_shadow
          && shadow_type <= SH_type_max_shadow )
         sp->u.sh.head = 1;
 
@@ -1525,7 +1525,7 @@ mfn_t shadow_alloc(struct domain *d,
 /* Return some shadow pages to the pool. */
 void shadow_free(struct domain *d, mfn_t smfn)
 {
-    struct page_info *next = NULL, *sp = mfn_to_page(smfn); 
+    struct page_info *next = NULL, *sp = mfn_to_page(smfn);
     struct page_list_head *pin_list;
     unsigned int pages;
     u32 shadow_type;
@@ -1540,16 +1540,16 @@ void shadow_free(struct domain *d, mfn_t smfn)
     pages = shadow_size(shadow_type);
     pin_list = &d->arch.paging.shadow.pinned_shadows;
 
-    for ( i = 0; i < pages; i++ ) 
+    for ( i = 0; i < pages; i++ )
     {
 #if SHADOW_OPTIMIZATIONS & (SHOPT_WRITABLE_HEURISTIC | SHOPT_FAST_EMULATION)
         struct vcpu *v;
-        for_each_vcpu(d, v) 
+        for_each_vcpu(d, v)
         {
 #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
             /* No longer safe to look for a writeable mapping in this shadow */
-            if ( v->arch.paging.shadow.last_writeable_pte_smfn 
-                 == mfn_x(page_to_mfn(sp)) ) 
+            if ( v->arch.paging.shadow.last_writeable_pte_smfn
+                 == mfn_x(page_to_mfn(sp)) )
                 v->arch.paging.shadow.last_writeable_pte_smfn = 0;
 #endif
 #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
@@ -1562,7 +1562,7 @@ void shadow_free(struct domain *d, mfn_t smfn)
             next = page_list_next(sp, pin_list);
         /* Strip out the type: this is now a free shadow page */
         sp->u.sh.type = sp->u.sh.head = 0;
-        /* Remember the TLB timestamp so we will know whether to flush 
+        /* Remember the TLB timestamp so we will know whether to flush
          * TLBs when we reuse the page.  Because the destructors leave the
          * contents of the pages in place, we can delay TLB flushes until
          * just before the allocator hands the page out again. */
@@ -1584,11 +1584,11 @@ shadow_alloc_p2m_page(struct domain *d)
 {
     struct page_info *pg;
 
-    /* This is called both from the p2m code (which never holds the 
+    /* This is called both from the p2m code (which never holds the
      * paging lock) and the log-dirty code (which always does). */
     paging_lock_recursive(d);
 
-    if ( d->arch.paging.shadow.total_pages 
+    if ( d->arch.paging.shadow.total_pages
          < shadow_min_acceptable_pages(d) + 1 )
     {
         if ( !d->arch.paging.p2m_alloc_failed )
@@ -1630,9 +1630,9 @@ shadow_free_p2m_page(struct domain *d, struct page_info 
*pg)
     }
     pg->count_info &= ~PGC_count_mask;
     pg->u.sh.type = SH_type_p2m_table; /* p2m code reuses type-info */
-    page_set_owner(pg, NULL); 
+    page_set_owner(pg, NULL);
 
-    /* This is called both from the p2m code (which never holds the 
+    /* This is called both from the p2m code (which never holds the
      * paging lock) and the log-dirty code (which always does). */
     paging_lock_recursive(d);
 
@@ -1647,7 +1647,7 @@ shadow_free_p2m_page(struct domain *d, struct page_info 
*pg)
  * Input will be rounded up to at least shadow_min_acceptable_pages(),
  * plus space for the p2m table.
  * Returns 0 for success, non-zero for failure. */
-static unsigned int sh_set_allocation(struct domain *d, 
+static unsigned int sh_set_allocation(struct domain *d,
                                       unsigned int pages,
                                       int *preempted)
 {
@@ -1663,7 +1663,7 @@ static unsigned int sh_set_allocation(struct domain *d,
             pages = 0;
         else
             pages -= d->arch.paging.shadow.p2m_pages;
-        
+
         /* Don't allocate less than the minimum acceptable, plus one page per
          * megabyte of RAM (for the p2m table) */
         lower_bound = shadow_min_acceptable_pages(d) + (d->tot_pages / 256);
@@ -1671,18 +1671,18 @@ static unsigned int sh_set_allocation(struct domain *d,
             pages = lower_bound;
     }
 
-    SHADOW_PRINTK("current %i target %i\n", 
+    SHADOW_PRINTK("current %i target %i\n",
                    d->arch.paging.shadow.total_pages, pages);
 
     for ( ; ; )
     {
-        if ( d->arch.paging.shadow.total_pages < pages ) 
+        if ( d->arch.paging.shadow.total_pages < pages )
         {
             /* Need to allocate more memory from domheap */
             sp = (struct page_info *)
                 alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
-            if ( sp == NULL ) 
-            { 
+            if ( sp == NULL )
+            {
                 SHADOW_PRINTK("failed to allocate shadow pages.\n");
                 return -ENOMEM;
             }
@@ -1693,8 +1693,8 @@ static unsigned int sh_set_allocation(struct domain *d,
             sp->u.sh.count = 0;
             sp->tlbflush_timestamp = 0; /* Not in any TLB */
             page_list_add_tail(sp, &d->arch.paging.shadow.freelist);
-        } 
-        else if ( d->arch.paging.shadow.total_pages > pages ) 
+        }
+        else if ( d->arch.paging.shadow.total_pages > pages )
         {
             /* Need to return memory to domheap */
             _shadow_prealloc(d, 1);
@@ -1734,7 +1734,7 @@ static unsigned int shadow_get_allocation(struct domain 
*d)
 
 /**************************************************************************/
 /* Hash table for storing the guest->shadow mappings.
- * The table itself is an array of pointers to shadows; the shadows are then 
+ * The table itself is an array of pointers to shadows; the shadows are then
  * threaded on a singly-linked list of shadows with the same hash value */
 
 #define SHADOW_HASH_BUCKETS 251
@@ -1742,7 +1742,7 @@ static unsigned int shadow_get_allocation(struct domain 
*d)
 
 /* Hash function that takes a gfn or mfn, plus another byte of type info */
 typedef u32 key_t;
-static inline key_t sh_hash(unsigned long n, unsigned int t) 
+static inline key_t sh_hash(unsigned long n, unsigned int t)
 {
     unsigned char *p = (unsigned char *)&n;
     key_t k = t;
@@ -1801,7 +1801,7 @@ static void sh_hash_audit_bucket(struct domain *d, int 
bucket)
                         SHADOW_ERROR("MFN %#"PRI_mfn" shadowed (by 
%#"PRI_mfn")"
                                      " and not OOS but has typecount %#lx\n",
                                      __backpointer(sp),
-                                     mfn_x(page_to_mfn(sp)), 
+                                     mfn_x(page_to_mfn(sp)),
                                      gpg->u.inuse.type_info);
                         BUG();
                     }
@@ -1809,7 +1809,7 @@ static void sh_hash_audit_bucket(struct domain *d, int 
bucket)
             }
             else /* Not an l1 */
 #endif
-            if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page 
+            if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page
                  && (gpg->u.inuse.type_info & PGT_count_mask) != 0 )
             {
                 SHADOW_ERROR("MFN %#"PRI_mfn" shadowed (by %#"PRI_mfn")"
@@ -1839,7 +1839,7 @@ static void sh_hash_audit(struct domain *d)
     if ( !(SHADOW_AUDIT_ENABLE) )
         return;
 
-    for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ ) 
+    for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ )
     {
         sh_hash_audit_bucket(d, i);
     }
@@ -1849,7 +1849,7 @@ static void sh_hash_audit(struct domain *d)
 #define sh_hash_audit(_d) do {} while(0)
 #endif /* Hashtable bucket audit */
 
-/* Allocate and initialise the table itself.  
+/* Allocate and initialise the table itself.
  * Returns 0 for success, 1 for error. */
 static int shadow_hash_alloc(struct domain *d)
 {
@@ -1906,11 +1906,11 @@ mfn_t shadow_hash_lookup(struct vcpu *v, unsigned long 
n, unsigned int t)
                 if ( unlikely(d->arch.paging.shadow.hash_walking != 0) )
                     /* Can't reorder: someone is walking the hash chains */
                     return page_to_mfn(sp);
-                else 
+                else
                 {
                     ASSERT(prev);
                     /* Delete sp from the list */
-                    prev->next_shadow = sp->next_shadow;                    
+                    prev->next_shadow = sp->next_shadow;
                     /* Re-insert it at the head of the list */
                     set_next_shadow(sp, d->arch.paging.shadow.hash_table[key]);
                     d->arch.paging.shadow.hash_table[key] = sp;
@@ -1930,14 +1930,14 @@ mfn_t shadow_hash_lookup(struct vcpu *v, unsigned long 
n, unsigned int t)
     return _mfn(INVALID_MFN);
 }
 
-void shadow_hash_insert(struct vcpu *v, unsigned long n, unsigned int t, 
+void shadow_hash_insert(struct vcpu *v, unsigned long n, unsigned int t,
                         mfn_t smfn)
 /* Put a mapping (n,t)->smfn into the hash table */
 {
     struct domain *d = v->domain;
     struct page_info *sp;
     key_t key;
-    
+
     ASSERT(paging_locked_by_me(d));
     ASSERT(d->arch.paging.shadow.hash_table);
     ASSERT(t);
@@ -1947,16 +1947,16 @@ void shadow_hash_insert(struct vcpu *v, unsigned long 
n, unsigned int t,
     perfc_incr(shadow_hash_inserts);
     key = sh_hash(n, t);
     sh_hash_audit_bucket(d, key);
-    
+
     /* Insert this shadow at the top of the bucket */
     sp = mfn_to_page(smfn);
     set_next_shadow(sp, d->arch.paging.shadow.hash_table[key]);
     d->arch.paging.shadow.hash_table[key] = sp;
-    
+
     sh_hash_audit_bucket(d, key);
 }
 
-void shadow_hash_delete(struct vcpu *v, unsigned long n, unsigned int t, 
+void shadow_hash_delete(struct vcpu *v, unsigned long n, unsigned int t,
                         mfn_t smfn)
 /* Excise the mapping (n,t)->smfn from the hash table */
 {
@@ -1973,12 +1973,12 @@ void shadow_hash_delete(struct vcpu *v, unsigned long 
n, unsigned int t,
     perfc_incr(shadow_hash_deletes);
     key = sh_hash(n, t);
     sh_hash_audit_bucket(d, key);
-    
+
     sp = mfn_to_page(smfn);
-    if ( d->arch.paging.shadow.hash_table[key] == sp ) 
+    if ( d->arch.paging.shadow.hash_table[key] == sp )
         /* Easy case: we're deleting the head item. */
         d->arch.paging.shadow.hash_table[key] = next_shadow(sp);
-    else 
+    else
     {
         /* Need to search for the one we want */
         x = d->arch.paging.shadow.hash_table[key];
@@ -2001,17 +2001,17 @@ void shadow_hash_delete(struct vcpu *v, unsigned long 
n, unsigned int t,
 
 typedef int (*hash_callback_t)(struct vcpu *v, mfn_t smfn, mfn_t other_mfn);
 
-static void hash_foreach(struct vcpu *v, 
-                         unsigned int callback_mask, 
+static void hash_foreach(struct vcpu *v,
+                         unsigned int callback_mask,
                          const hash_callback_t callbacks[],
                          mfn_t callback_mfn)
-/* Walk the hash table looking at the types of the entries and 
- * calling the appropriate callback function for each entry. 
+/* Walk the hash table looking at the types of the entries and
+ * calling the appropriate callback function for each entry.
  * The mask determines which shadow types we call back for, and the array
  * of callbacks tells us which function to call.
- * Any callback may return non-zero to let us skip the rest of the scan. 
+ * Any callback may return non-zero to let us skip the rest of the scan.
  *
- * WARNING: Callbacks MUST NOT add or remove hash entries unless they 
+ * WARNING: Callbacks MUST NOT add or remove hash entries unless they
  * then return non-zero to terminate the scan. */
 {
     int i, done = 0;
@@ -2028,7 +2028,7 @@ static void hash_foreach(struct vcpu *v,
     ASSERT(d->arch.paging.shadow.hash_walking == 0);
     d->arch.paging.shadow.hash_walking = 1;
 
-    for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ ) 
+    for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ )
     {
         /* WARNING: This is not safe against changes to the hash table.
          * The callback *must* return non-zero if it has inserted or
@@ -2044,15 +2044,15 @@ static void hash_foreach(struct vcpu *v,
                 if ( done ) break;
             }
         }
-        if ( done ) break; 
+        if ( done ) break;
     }
-    d->arch.paging.shadow.hash_walking = 0; 
+    d->arch.paging.shadow.hash_walking = 0;
 }
 
 
 /**************************************************************************/
 /* Destroy a shadow page: simple dispatcher to call the per-type destructor
- * which will decrement refcounts appropriately and return memory to the 
+ * which will decrement refcounts appropriately and return memory to the
  * free pool. */
 
 void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
@@ -2065,13 +2065,13 @@ void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
 
     /* Double-check, if we can, that the shadowed page belongs to this
      * domain, (by following the back-pointer). */
-    ASSERT(t == SH_type_fl1_32_shadow  ||  
-           t == SH_type_fl1_pae_shadow ||  
-           t == SH_type_fl1_64_shadow  || 
-           t == SH_type_monitor_table  || 
+    ASSERT(t == SH_type_fl1_32_shadow  ||
+           t == SH_type_fl1_pae_shadow ||
+           t == SH_type_fl1_64_shadow  ||
+           t == SH_type_monitor_table  ||
            (is_pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
            (page_get_owner(mfn_to_page(backpointer(sp)))
-            == v->domain)); 
+            == v->domain));
 
     /* The down-shifts here are so that the switch statement is on nice
      * small numbers that the compiler will enjoy */
@@ -2115,7 +2115,7 @@ void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
         SHADOW_ERROR("tried to destroy shadow of bad type %08lx\n",
                      (unsigned long)t);
         BUG();
-    }    
+    }
 }
 
 static inline void trace_shadow_wrmap_bf(mfn_t gmfn)
@@ -2129,13 +2129,13 @@ static inline void trace_shadow_wrmap_bf(mfn_t gmfn)
 }
 
 /**************************************************************************/
-/* Remove all writeable mappings of a guest frame from the shadow tables 
- * Returns non-zero if we need to flush TLBs. 
+/* Remove all writeable mappings of a guest frame from the shadow tables
+ * Returns non-zero if we need to flush TLBs.
  * level and fault_addr desribe how we found this to be a pagetable;
  * level==0 means we have some other reason for revoking write access.
  * If level==0 we are allowed to fail, returning -1. */
 
-int sh_remove_write_access(struct vcpu *v, mfn_t gmfn, 
+int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
                            unsigned int level,
                            unsigned long fault_addr)
 {
@@ -2180,7 +2180,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
 
     /* Early exit if it's already a pagetable, or otherwise not writeable */
     if ( (sh_mfn_is_a_page_table(gmfn)
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
          /* Unless they've been allowed to go out of sync with their shadows */
            && !mfn_oos_may_write(gmfn)
 #endif
@@ -2192,11 +2192,11 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
 
     perfc_incr(shadow_writeable);
 
-    /* If this isn't a "normal" writeable page, the domain is trying to 
+    /* If this isn't a "normal" writeable page, the domain is trying to
      * put pagetables in special memory of some kind.  We can't allow that. */
     if ( (pg->u.inuse.type_info & PGT_type_mask) != PGT_writable_page )
     {
-        SHADOW_ERROR("can't remove write access to mfn %lx, type_info is %" 
+        SHADOW_ERROR("can't remove write access to mfn %lx, type_info is %"
                       PRtype_info "\n",
                       mfn_x(gmfn), mfn_to_page(gmfn)->u.inuse.type_info);
         domain_crash(v->domain);
@@ -2219,7 +2219,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
                 return 1;                                               \
             }                                                           \
         } while (0)
-        
+
         if ( v->arch.paging.mode->guest_levels == 2 )
         {
             if ( level == 1 )
@@ -2227,27 +2227,27 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
                 GUESS(0xC0000000UL + (fault_addr >> 10), 1);
 
             /* Linux lowmem: first 896MB is mapped 1-to-1 above 0xC0000000 */
-            if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 ) 
+            if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 )
                 GUESS(0xC0000000UL + (gfn << PAGE_SHIFT), 4);
 
             /* FreeBSD: Linear map at 0xBFC00000 */
             if ( level == 1 )
-                GUESS(0xBFC00000UL 
+                GUESS(0xBFC00000UL
                       + ((fault_addr & VADDR_MASK) >> 10), 6);
         }
         else if ( v->arch.paging.mode->guest_levels == 3 )
         {
             /* 32bit PAE w2k3: linear map at 0xC0000000 */
-            switch ( level ) 
+            switch ( level )
             {
             case 1: GUESS(0xC0000000UL + (fault_addr >> 9), 2); break;
             case 2: GUESS(0xC0600000UL + (fault_addr >> 18), 2); break;
             }
 
             /* Linux lowmem: first 896MB is mapped 1-to-1 above 0xC0000000 */
-            if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 ) 
+            if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 )
                 GUESS(0xC0000000UL + (gfn << PAGE_SHIFT), 4);
-            
+
             /* FreeBSD PAE: Linear map at 0xBF800000 */
             switch ( level )
             {
@@ -2260,20 +2260,20 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
         else if ( v->arch.paging.mode->guest_levels == 4 )
         {
             /* 64bit w2k3: linear map at 0xfffff68000000000 */
-            switch ( level ) 
+            switch ( level )
             {
-            case 1: GUESS(0xfffff68000000000UL 
+            case 1: GUESS(0xfffff68000000000UL
                           + ((fault_addr & VADDR_MASK) >> 9), 3); break;
             case 2: GUESS(0xfffff6fb40000000UL
                           + ((fault_addr & VADDR_MASK) >> 18), 3); break;
-            case 3: GUESS(0xfffff6fb7da00000UL 
+            case 3: GUESS(0xfffff6fb7da00000UL
                           + ((fault_addr & VADDR_MASK) >> 27), 3); break;
             }
 
             /* 64bit Linux direct map at 0xffff880000000000; older kernels
              * had it at 0xffff810000000000, and older kernels yet had it
              * at 0x0000010000000000UL */
-            gfn = mfn_to_gfn(v->domain, gmfn); 
+            gfn = mfn_to_gfn(v->domain, gmfn);
             GUESS(0xffff880000000000UL + (gfn << PAGE_SHIFT), 4);
             GUESS(0xffff810000000000UL + (gfn << PAGE_SHIFT), 4);
             GUESS(0x0000010000000000UL + (gfn << PAGE_SHIFT), 4);
@@ -2283,7 +2283,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
              * kpm_vbase; 0xfffffe0000000000UL
              */
             GUESS(0xfffffe0000000000UL + (gfn << PAGE_SHIFT), 4);
- 
+
              /* FreeBSD 64bit: linear map 0xffff800000000000 */
              switch ( level )
              {
@@ -2316,7 +2316,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
         mfn_t last_smfn = _mfn(v->arch.paging.shadow.last_writeable_pte_smfn);
         int shtype = mfn_to_page(last_smfn)->u.sh.type;
 
-        if ( callbacks[shtype] ) 
+        if ( callbacks[shtype] )
             callbacks[shtype](v, last_smfn, gmfn);
 
         if ( (pg->u.inuse.type_info & PGT_count_mask) != old_count )
@@ -2327,7 +2327,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
         return 1;
 
 #endif /* SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC */
-    
+
     /* Brute-force search of all the shadows, by walking the hash */
     trace_shadow_wrmap_bf(gmfn);
     if ( level == 0 )
@@ -2348,20 +2348,20 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
                       (mfn_to_page(gmfn)->u.inuse.type_info&PGT_count_mask));
         domain_crash(v->domain);
     }
-    
+
     /* We killed at least one writeable mapping, so must flush TLBs. */
     return 1;
 }
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
 int sh_remove_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn,
                                      mfn_t smfn, unsigned long off)
 {
     struct page_info *sp = mfn_to_page(smfn);
-    
+
     ASSERT(mfn_valid(smfn));
     ASSERT(mfn_valid(gmfn));
-    
+
     if ( sp->u.sh.type == SH_type_l1_32_shadow
          || sp->u.sh.type == SH_type_fl1_32_shadow )
     {
@@ -2379,7 +2379,7 @@ int sh_remove_write_access_from_sl1p(struct vcpu *v, 
mfn_t gmfn,
 
     return 0;
 }
-#endif 
+#endif
 
 /**************************************************************************/
 /* Remove all mappings of a guest frame from the shadow tables.
@@ -2427,9 +2427,9 @@ static int sh_remove_all_mappings(struct vcpu *v, mfn_t 
gmfn)
      * can be called via put_page_type when we clear a shadow l1e).*/
     paging_lock_recursive(v->domain);
 
-    /* XXX TODO: 
+    /* XXX TODO:
      * Heuristics for finding the (probably) single mapping of this gmfn */
-    
+
     /* Brute-force search of all the shadows, by walking the hash */
     perfc_incr(shadow_mappings_bf);
     hash_foreach(v, callback_mask, callbacks, gmfn);
@@ -2437,8 +2437,8 @@ static int sh_remove_all_mappings(struct vcpu *v, mfn_t 
gmfn)
     /* If that didn't catch the mapping, something is very wrong */
     if ( !sh_check_page_has_no_refs(page) )
     {
-        /* Don't complain if we're in HVM and there are some extra mappings: 
-         * The qemu helper process has an untyped mapping of this dom's RAM 
+        /* Don't complain if we're in HVM and there are some extra mappings:
+         * The qemu helper process has an untyped mapping of this dom's RAM
          * and the HVM restore program takes another.
          * Also allow one typed refcount for xenheap pages, to match
          * share_xen_page_with_guest(). */
@@ -2448,7 +2448,7 @@ static int sh_remove_all_mappings(struct vcpu *v, mfn_t 
gmfn)
                    == !!is_xen_heap_page(page))) )
         {
             SHADOW_ERROR("can't find all mappings of mfn %lx: "
-                          "c=%08lx t=%08lx\n", mfn_x(gmfn), 
+                          "c=%08lx t=%08lx\n", mfn_x(gmfn),
                           page->count_info, page->u.inuse.type_info);
         }
     }
@@ -2475,7 +2475,7 @@ static int sh_remove_shadow_via_pointer(struct vcpu *v, 
mfn_t smfn)
     ASSERT(sp->u.sh.type > 0);
     ASSERT(sp->u.sh.type < SH_type_max_shadow);
     ASSERT(sh_type_has_up_pointer(v, sp->u.sh.type));
-    
+
     if (sp->up == 0) return 0;
     pmfn = _mfn(sp->up >> PAGE_SHIFT);
     ASSERT(mfn_valid(pmfn));
@@ -2483,7 +2483,7 @@ static int sh_remove_shadow_via_pointer(struct vcpu *v, 
mfn_t smfn)
     ASSERT(vaddr);
     vaddr += sp->up & (PAGE_SIZE-1);
     ASSERT(l1e_get_pfn(*(l1_pgentry_t *)vaddr) == mfn_x(smfn));
-    
+
     /* Is this the only reference to this shadow? */
     rc = (sp->u.sh.count == 1) ? 1 : 0;
 
@@ -2508,7 +2508,7 @@ static int sh_remove_shadow_via_pointer(struct vcpu *v, 
mfn_t smfn)
         break;
     default: BUG(); /* Some wierd unknown shadow type */
     }
-    
+
     sh_unmap_domain_page(vaddr);
     if ( rc )
         perfc_incr(shadow_up_pointer);
@@ -2519,8 +2519,8 @@ static int sh_remove_shadow_via_pointer(struct vcpu *v, 
mfn_t smfn)
 }
 
 void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all)
-/* Remove the shadows of this guest page.  
- * If fast != 0, just try the quick heuristic, which will remove 
+/* Remove the shadows of this guest page.
+ * If fast != 0, just try the quick heuristic, which will remove
  * at most one reference to each shadow of the page.  Otherwise, walk
  * all the shadow tables looking for refs to shadows of this gmfn.
  * If all != 0, kill the domain if we can't find all the shadows.
@@ -2530,7 +2530,7 @@ void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int 
fast, int all)
     struct page_info *pg = mfn_to_page(gmfn);
     mfn_t smfn;
     unsigned char t;
-    
+
     /* Dispatch table for getting per-type functions: each level must
      * be called with the function to remove a lower-level shadow. */
     static const hash_callback_t callbacks[SH_type_unused] = {
@@ -2642,7 +2642,7 @@ void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int 
fast, int all)
         domain_crash(v->domain);
     }
 
-    /* Need to flush TLBs now, so that linear maps are safe next time we 
+    /* Need to flush TLBs now, so that linear maps are safe next time we
      * take a fault. */
     flush_tlb_mask(v->domain->domain_dirty_cpumask);
 
@@ -2656,18 +2656,18 @@ sh_remove_all_shadows_and_parents(struct vcpu *v, mfn_t 
gmfn)
 {
     sh_remove_shadows(v, gmfn, 0, 1);
     /* XXX TODO:
-     * Rework this hashtable walker to return a linked-list of all 
-     * the shadows it modified, then do breadth-first recursion 
-     * to find the way up to higher-level tables and unshadow them too. 
+     * Rework this hashtable walker to return a linked-list of all
+     * the shadows it modified, then do breadth-first recursion
+     * to find the way up to higher-level tables and unshadow them too.
      *
      * The current code (just tearing down each page's shadows as we
-     * detect that it is not a pagetable) is correct, but very slow. 
+     * detect that it is not a pagetable) is correct, but very slow.
      * It means extra emulated writes and slows down removal of mappings. */
 }
 
 /**************************************************************************/
 
-/* Reset the up-pointers of every L3 shadow to 0. 
+/* Reset the up-pointers of every L3 shadow to 0.
  * This is called when l3 shadows stop being pinnable, to clear out all
  * the list-head bits so the up-pointer field is properly inititalised. */
 static int sh_clear_up_pointer(struct vcpu *v, mfn_t smfn, mfn_t unused)
@@ -2711,7 +2711,7 @@ static void sh_update_paging_modes(struct vcpu *v)
 
     ASSERT(paging_locked_by_me(d));
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
     /* Make sure this vcpu has a virtual TLB array allocated */
     if ( unlikely(!v->arch.paging.vtlb) )
     {
@@ -2727,7 +2727,7 @@ static void sh_update_paging_modes(struct vcpu *v)
     }
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     if ( mfn_x(v->arch.paging.shadow.oos_snapshot[0]) == INVALID_MFN )
     {
         int i;
@@ -2768,7 +2768,7 @@ static void sh_update_paging_modes(struct vcpu *v)
         ASSERT(shadow_mode_translate(d));
         ASSERT(shadow_mode_external(d));
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
         /* Need to resync all our pages now, because if a page goes out
          * of sync with paging enabled and is resynced with paging
          * disabled, the resync will go wrong. */
@@ -2827,7 +2827,7 @@ static void sh_update_paging_modes(struct vcpu *v)
                 /* Need to make a new monitor table for the new mode */
                 mfn_t new_mfn, old_mfn;
 
-                if ( v != current && vcpu_runnable(v) ) 
+                if ( v != current && vcpu_runnable(v) )
                 {
                     SHADOW_ERROR("Some third party (d=%u v=%u) is changing "
                                  "this HVM vcpu's (d=%u v=%u) paging mode "
@@ -2847,7 +2847,7 @@ static void sh_update_paging_modes(struct vcpu *v)
                 SHADOW_PRINTK("new monitor table %"PRI_mfn "\n",
                                mfn_x(new_mfn));
 
-                /* Don't be running on the old monitor table when we 
+                /* Don't be running on the old monitor table when we
                  * pull it down!  Switch CR3, and warn the HVM code that
                  * its host cr3 has changed. */
                 make_cr3(v, mfn_x(new_mfn));
@@ -2914,9 +2914,9 @@ static void sh_new_mode(struct domain *d, u32 new_mode)
 int shadow_enable(struct domain *d, u32 mode)
 /* Turn on "permanent" shadow features: external, translate, refcount.
  * Can only be called once on a domain, and these features cannot be
- * disabled. 
+ * disabled.
  * Returns 0 for success, -errno for failure. */
-{    
+{
     unsigned int old_pages;
     struct page_info *pg = NULL;
     uint32_t *e;
@@ -2942,14 +2942,14 @@ int shadow_enable(struct domain *d, u32 mode)
     if ( old_pages == 0 )
     {
         unsigned int r;
-        paging_lock(d);                
+        paging_lock(d);
         r = sh_set_allocation(d, 1024, NULL); /* Use at least 4MB */
         if ( r != 0 )
         {
             sh_set_allocation(d, 0, NULL);
             rv = -ENOMEM;
             goto out_locked;
-        }        
+        }
         paging_unlock(d);
     }
 
@@ -2957,7 +2957,7 @@ int shadow_enable(struct domain *d, u32 mode)
     d->arch.paging.alloc_page = shadow_alloc_p2m_page;
     d->arch.paging.free_page = shadow_free_p2m_page;
 
-    /* Init the P2M table.  Must be done before we take the paging lock 
+    /* Init the P2M table.  Must be done before we take the paging lock
      * to avoid possible deadlock. */
     if ( mode & PG_translate )
     {
@@ -2970,7 +2970,7 @@ int shadow_enable(struct domain *d, u32 mode)
      * have paging disabled */
     if ( is_hvm_domain(d) )
     {
-        /* Get a single page from the shadow pool.  Take it via the 
+        /* Get a single page from the shadow pool.  Take it via the
          * P2M interface to make freeing it simpler afterwards. */
         pg = shadow_alloc_p2m_page(d);
         if ( pg == NULL )
@@ -2979,11 +2979,11 @@ int shadow_enable(struct domain *d, u32 mode)
             goto out_unlocked;
         }
         /* Fill it with 32-bit, non-PAE superpage entries, each mapping 4MB
-         * of virtual address space onto the same physical address range */ 
+         * of virtual address space onto the same physical address range */
         e = __map_domain_page(pg);
         for ( i = 0; i < PAGE_SIZE / sizeof(*e); i++ )
             e[i] = ((0x400000U * i)
-                    | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER 
+                    | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER
                     | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
         sh_unmap_domain_page(e);
         pg->u.inuse.type_info = PGT_l2_page_table | 1 | PGT_validated;
@@ -3005,8 +3005,8 @@ int shadow_enable(struct domain *d, u32 mode)
         goto out_locked;
     }
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL) 
-    /* We assume we're dealing with an older 64bit linux guest until we 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
+    /* We assume we're dealing with an older 64bit linux guest until we
      * see the guest use more than one l4 per vcpu. */
     d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
 #endif
@@ -3073,7 +3073,7 @@ void shadow_teardown(struct domain *d)
         }
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
         {
             int i;
             mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
@@ -3093,24 +3093,24 @@ void shadow_teardown(struct domain *d)
         SHADOW_PRINTK("teardown of domain %u starts."
                        "  Shadow pages total = %u, free = %u, p2m=%u\n",
                        d->domain_id,
-                       d->arch.paging.shadow.total_pages, 
-                       d->arch.paging.shadow.free_pages, 
+                       d->arch.paging.shadow.total_pages,
+                       d->arch.paging.shadow.free_pages,
                        d->arch.paging.shadow.p2m_pages);
         /* Destroy all the shadows and release memory to domheap */
         sh_set_allocation(d, 0, NULL);
         /* Release the hash table back to xenheap */
-        if (d->arch.paging.shadow.hash_table) 
+        if (d->arch.paging.shadow.hash_table)
             shadow_hash_teardown(d);
         /* Should not have any more memory held */
         SHADOW_PRINTK("teardown done."
                        "  Shadow pages total = %u, free = %u, p2m=%u\n",
-                       d->arch.paging.shadow.total_pages, 
-                       d->arch.paging.shadow.free_pages, 
+                       d->arch.paging.shadow.total_pages,
+                       d->arch.paging.shadow.free_pages,
                        d->arch.paging.shadow.p2m_pages);
         ASSERT(d->arch.paging.shadow.total_pages == 0);
     }
 
-    /* Free the non-paged-vcpus pagetable; must happen after we've 
+    /* Free the non-paged-vcpus pagetable; must happen after we've
      * destroyed any shadows of it or sh_destroy_shadow will get confused. */
     if ( !pagetable_is_null(d->arch.paging.shadow.unpaged_pagetable) )
     {
@@ -3120,7 +3120,7 @@ void shadow_teardown(struct domain *d)
             if ( !hvm_paging_enabled(v) )
                 v->arch.guest_table = pagetable_null();
         }
-        unpaged_pagetable = 
+        unpaged_pagetable =
             pagetable_get_page(d->arch.paging.shadow.unpaged_pagetable);
         d->arch.paging.shadow.unpaged_pagetable = pagetable_null();
     }
@@ -3140,7 +3140,7 @@ void shadow_teardown(struct domain *d)
     paging_unlock(d);
 
     /* Must be called outside the lock */
-    if ( unpaged_pagetable ) 
+    if ( unpaged_pagetable )
         shadow_free_p2m_page(d, unpaged_pagetable);
 }
 
@@ -3150,11 +3150,11 @@ void shadow_final_teardown(struct domain *d)
     SHADOW_PRINTK("dom %u final teardown starts."
                    "  Shadow pages total = %u, free = %u, p2m=%u\n",
                    d->domain_id,
-                   d->arch.paging.shadow.total_pages, 
-                   d->arch.paging.shadow.free_pages, 
+                   d->arch.paging.shadow.total_pages,
+                   d->arch.paging.shadow.free_pages,
                    d->arch.paging.shadow.p2m_pages);
 
-    /* Double-check that the domain didn't have any shadow memory.  
+    /* Double-check that the domain didn't have any shadow memory.
      * It is possible for a domain that never got domain_kill()ed
      * to get here with its shadow allocation intact. */
     if ( d->arch.paging.shadow.total_pages != 0 )
@@ -3168,8 +3168,8 @@ void shadow_final_teardown(struct domain *d)
     SHADOW_PRINTK("dom %u final teardown done."
                    "  Shadow pages total = %u, free = %u, p2m=%u\n",
                    d->domain_id,
-                   d->arch.paging.shadow.total_pages, 
-                   d->arch.paging.shadow.free_pages, 
+                   d->arch.paging.shadow.total_pages,
+                   d->arch.paging.shadow.free_pages,
                    d->arch.paging.shadow.p2m_pages);
     paging_unlock(d);
 }
@@ -3214,7 +3214,7 @@ static int shadow_one_bit_enable(struct domain *d, u32 
mode)
     return 0;
 }
 
-static int shadow_one_bit_disable(struct domain *d, u32 mode) 
+static int shadow_one_bit_disable(struct domain *d, u32 mode)
 /* Turn off a single shadow mode feature */
 {
     struct vcpu *v;
@@ -3234,8 +3234,8 @@ static int shadow_one_bit_disable(struct domain *d, u32 
mode)
         SHADOW_PRINTK("un-shadowing of domain %u starts."
                        "  Shadow pages total = %u, free = %u, p2m=%u\n",
                        d->domain_id,
-                       d->arch.paging.shadow.total_pages, 
-                       d->arch.paging.shadow.free_pages, 
+                       d->arch.paging.shadow.total_pages,
+                       d->arch.paging.shadow.free_pages,
                        d->arch.paging.shadow.p2m_pages);
         for_each_vcpu(d, v)
         {
@@ -3246,7 +3246,7 @@ static int shadow_one_bit_disable(struct domain *d, u32 
mode)
             else
                 make_cr3(v, pagetable_get_pfn(v->arch.guest_table));
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
             {
                 int i;
                 mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
@@ -3267,8 +3267,8 @@ static int shadow_one_bit_disable(struct domain *d, u32 
mode)
         SHADOW_PRINTK("un-shadowing of domain %u done."
                        "  Shadow pages total = %u, free = %u, p2m=%u\n",
                        d->domain_id,
-                       d->arch.paging.shadow.total_pages, 
-                       d->arch.paging.shadow.free_pages, 
+                       d->arch.paging.shadow.total_pages,
+                       d->arch.paging.shadow.free_pages,
                        d->arch.paging.shadow.p2m_pages);
     }
 
@@ -3306,7 +3306,7 @@ static int shadow_test_disable(struct domain *d)
 /* P2M map manipulations */
 
 /* shadow specific code which should be called when P2M table entry is updated
- * with new content. It is responsible for update the entry, as well as other 
+ * with new content. It is responsible for update the entry, as well as other
  * shadow processing jobs.
  */
 
@@ -3329,7 +3329,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
     {
         mfn_t mfn = _mfn(l1e_get_pfn(*p));
         p2m_type_t p2mt = p2m_flags_to_type(l1e_get_flags(*p));
-        if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) ) 
+        if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) )
         {
             sh_remove_all_shadows_and_parents(v, mfn);
             if ( sh_remove_all_mappings(v, mfn) )
@@ -3337,8 +3337,8 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
         }
     }
 
-    /* If we're removing a superpage mapping from the p2m, we need to check 
-     * all the pages covered by it.  If they're still there in the new 
+    /* If we're removing a superpage mapping from the p2m, we need to check
+     * all the pages covered by it.  If they're still there in the new
      * scheme, that's OK, but otherwise they must be unshadowed. */
     if ( level == 2 && (l1e_get_flags(*p) & _PAGE_PRESENT) &&
          (l1e_get_flags(*p) & _PAGE_PSE) )
@@ -3355,13 +3355,13 @@ static void sh_unshadow_for_p2m_change(struct domain 
*d, unsigned long gfn,
 
             /* If we're replacing a superpage with a normal L1 page, map it */
             if ( (l1e_get_flags(new) & _PAGE_PRESENT)
-                 && !(l1e_get_flags(new) & _PAGE_PSE) 
+                 && !(l1e_get_flags(new) & _PAGE_PSE)
                  && mfn_valid(nmfn) )
                 npte = map_domain_page(mfn_x(nmfn));
-            
+
             for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
             {
-                if ( !npte 
+                if ( !npte
                      || !p2m_is_ram(p2m_flags_to_type(l1e_get_flags(npte[i])))
                      || l1e_get_pfn(npte[i]) != mfn_x(omfn) )
                 {
@@ -3374,7 +3374,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
                 omfn = _mfn(mfn_x(omfn) + 1);
             }
             flush_tlb_mask(&flushmask);
-            
+
             if ( npte )
                 unmap_domain_page(npte);
         }
@@ -3389,7 +3389,7 @@ shadow_write_p2m_entry(struct domain *d, unsigned long 
gfn,
     paging_lock(d);
 
     /* If there are any shadows, update them.  But if shadow_teardown()
-     * has already been called then it's not safe to try. */ 
+     * has already been called then it's not safe to try. */
     if ( likely(d->arch.paging.shadow.total_pages != 0) )
          sh_unshadow_for_p2m_change(d, gfn, p, new, level);
 
@@ -3426,8 +3426,8 @@ static int sh_enable_log_dirty(struct domain *d, bool_t 
log_global)
     paging_lock(d);
     if ( shadow_mode_enabled(d) )
     {
-        /* This domain already has some shadows: need to clear them out 
-         * of the way to make sure that all references to guest memory are 
+        /* This domain already has some shadows: need to clear them out
+         * of the way to make sure that all references to guest memory are
          * properly write-protected */
         shadow_blow_tables(d);
     }
@@ -3439,7 +3439,7 @@ static int sh_enable_log_dirty(struct domain *d, bool_t 
log_global)
     if ( is_pv_32on64_domain(d) )
         d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
 #endif
-    
+
     ret = shadow_one_bit_enable(d, PG_log_dirty);
     paging_unlock(d);
 
@@ -3454,12 +3454,12 @@ static int sh_disable_log_dirty(struct domain *d)
     paging_lock(d);
     ret = shadow_one_bit_disable(d, PG_log_dirty);
     paging_unlock(d);
-    
+
     return ret;
 }
 
-/* This function is called when we CLEAN log dirty bitmap. See 
- * paging_log_dirty_op() for details. 
+/* This function is called when we CLEAN log dirty bitmap. See
+ * paging_log_dirty_op() for details.
  */
 static void sh_clean_dirty_bitmap(struct domain *d)
 {
@@ -3519,7 +3519,7 @@ int shadow_track_dirty_vram(struct domain *d,
      * no need to be careful. */
     if ( !dirty_vram )
     {
-        /* Throw away all the shadows rather than walking through them 
+        /* Throw away all the shadows rather than walking through them
          * up to nr times getting rid of mappings of each pfn */
         shadow_blow_tables(d);
 
@@ -3665,7 +3665,7 @@ out:
 /**************************************************************************/
 /* Shadow-control XEN_DOMCTL dispatcher */
 
-int shadow_domctl(struct domain *d, 
+int shadow_domctl(struct domain *d,
                   xen_domctl_shadow_op_t *sc,
                   XEN_GUEST_HANDLE_PARAM(void) u_domctl)
 {
@@ -3675,7 +3675,7 @@ int shadow_domctl(struct domain *d,
     {
     case XEN_DOMCTL_SHADOW_OP_OFF:
         if ( d->arch.paging.mode == PG_SH_enable )
-            if ( (rc = shadow_test_disable(d)) != 0 ) 
+            if ( (rc = shadow_test_disable(d)) != 0 )
                 return rc;
         return 0;
 
@@ -3695,7 +3695,7 @@ int shadow_domctl(struct domain *d,
     case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
         paging_lock(d);
         if ( sc->mb == 0 && shadow_mode_enabled(d) )
-        {            
+        {
             /* Can't set the allocation to zero unless the domain stops using
              * shadow pagetables first */
             SHADOW_ERROR("Can't set shadow allocation to zero, domain %u"
@@ -3709,7 +3709,7 @@ int shadow_domctl(struct domain *d,
             /* Not finished.  Set up to re-run the call. */
             rc = hypercall_create_continuation(
                 __HYPERVISOR_domctl, "h", u_domctl);
-        else 
+        else
             /* Finished.  Return the new allocation */
             sc->mb = shadow_get_allocation(d);
         return rc;
@@ -3726,7 +3726,7 @@ int shadow_domctl(struct domain *d,
 
 #if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_FULL
 
-void shadow_audit_tables(struct vcpu *v) 
+void shadow_audit_tables(struct vcpu *v)
 {
     /* Dispatch table for getting per-type functions */
     static const hash_callback_t callbacks[SH_type_unused] = {
@@ -3746,7 +3746,7 @@ void shadow_audit_tables(struct vcpu *v)
         SHADOW_INTERNAL_NAME(sh_audit_l4_table, 4),  /* l4_64   */
         NULL  /* All the rest */
     };
-    unsigned int mask; 
+    unsigned int mask;
 
     if ( !(SHADOW_AUDIT_ENABLE) )
         return;
@@ -3765,7 +3765,7 @@ void shadow_audit_tables(struct vcpu *v)
         case 2: mask = (SHF_L1_32|SHF_FL1_32|SHF_L2_32); break;
         case 3: mask = (SHF_L1_PAE|SHF_FL1_PAE|SHF_L2_PAE
                         |SHF_L2H_PAE); break;
-        case 4: mask = (SHF_L1_64|SHF_FL1_64|SHF_L2_64  
+        case 4: mask = (SHF_L1_64|SHF_FL1_64|SHF_L2_64
                         |SHF_L3_64|SHF_L4_64); break;
         default: BUG();
         }
@@ -3782,5 +3782,5 @@ void shadow_audit_tables(struct vcpu *v)
  * c-file-style: "BSD"
  * c-basic-offset: 4
  * indent-tabs-mode: nil
- * End: 
+ * End:
  */
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 5fc10c9..434df61 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1,7 +1,7 @@
 /******************************************************************************
  * arch/x86/mm/shadow/multi.c
  *
- * Simple, mostly-synchronous shadow page tables. 
+ * Simple, mostly-synchronous shadow page tables.
  * Parts of this code are Copyright (c) 2006 by XenSource Inc.
  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
  * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
@@ -43,25 +43,25 @@
 #include "types.h"
 
 /* THINGS TO DO LATER:
- * 
+ *
  * TEARDOWN HEURISTICS
- * Also: have a heuristic for when to destroy a previous paging-mode's 
+ * Also: have a heuristic for when to destroy a previous paging-mode's
  * shadows.  When a guest is done with its start-of-day 32-bit tables
- * and reuses the memory we want to drop those shadows.  Start with 
- * shadows in a page in two modes as a hint, but beware of clever tricks 
+ * and reuses the memory we want to drop those shadows.  Start with
+ * shadows in a page in two modes as a hint, but beware of clever tricks
  * like reusing a pagetable for both PAE and 64-bit during boot...
  *
  * PAE LINEAR MAPS
  * Rework shadow_get_l*e() to have the option of using map_domain_page()
- * instead of linear maps.  Add appropriate unmap_l*e calls in the users. 
- * Then we can test the speed difference made by linear maps.  If the 
- * map_domain_page() version is OK on PAE, we could maybe allow a lightweight 
- * l3-and-l2h-only shadow mode for PAE PV guests that would allow them 
- * to share l2h pages again. 
+ * instead of linear maps.  Add appropriate unmap_l*e calls in the users.
+ * Then we can test the speed difference made by linear maps.  If the
+ * map_domain_page() version is OK on PAE, we could maybe allow a lightweight
+ * l3-and-l2h-only shadow mode for PAE PV guests that would allow them
+ * to share l2h pages again.
  *
  * PSE disabled / PSE36
  * We don't support any modes other than PSE enabled, PSE36 disabled.
- * Neither of those would be hard to change, but we'd need to be able to 
+ * Neither of those would be hard to change, but we'd need to be able to
  * deal with shadows made in one mode and used in another.
  */
 
@@ -90,7 +90,7 @@ static char *fetch_type_names[] = {
  *              shadow L1 which maps its "splinters".
  */
 
-static inline mfn_t 
+static inline mfn_t
 get_fl1_shadow_status(struct vcpu *v, gfn_t gfn)
 /* Look for FL1 shadows in the hash table */
 {
@@ -99,7 +99,7 @@ get_fl1_shadow_status(struct vcpu *v, gfn_t gfn)
     return smfn;
 }
 
-static inline mfn_t 
+static inline mfn_t
 get_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type)
 /* Look for shadows in the hash table */
 {
@@ -109,7 +109,7 @@ get_shadow_status(struct vcpu *v, mfn_t gmfn, u32 
shadow_type)
     return smfn;
 }
 
-static inline void 
+static inline void
 set_fl1_shadow_status(struct vcpu *v, gfn_t gfn, mfn_t smfn)
 /* Put an FL1 shadow into the hash table */
 {
@@ -120,7 +120,7 @@ set_fl1_shadow_status(struct vcpu *v, gfn_t gfn, mfn_t smfn)
     shadow_hash_insert(v, gfn_x(gfn), SH_type_fl1_shadow, smfn);
 }
 
-static inline void 
+static inline void
 set_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type, mfn_t smfn)
 /* Put a shadow into the hash table */
 {
@@ -143,7 +143,7 @@ set_shadow_status(struct vcpu *v, mfn_t gmfn, u32 
shadow_type, mfn_t smfn)
     shadow_hash_insert(v, mfn_x(gmfn), shadow_type, smfn);
 }
 
-static inline void 
+static inline void
 delete_fl1_shadow_status(struct vcpu *v, gfn_t gfn, mfn_t smfn)
 /* Remove a shadow from the hash table */
 {
@@ -153,7 +153,7 @@ delete_fl1_shadow_status(struct vcpu *v, gfn_t gfn, mfn_t 
smfn)
     shadow_hash_delete(v, gfn_x(gfn), SH_type_fl1_shadow, smfn);
 }
 
-static inline void 
+static inline void
 delete_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type, mfn_t smfn)
 /* Remove a shadow from the hash table */
 {
@@ -172,10 +172,10 @@ delete_shadow_status(struct vcpu *v, mfn_t gmfn, u32 
shadow_type, mfn_t smfn)
 /* Functions for walking the guest page tables */
 
 static inline uint32_t
-sh_walk_guest_tables(struct vcpu *v, unsigned long va, walk_t *gw, 
+sh_walk_guest_tables(struct vcpu *v, unsigned long va, walk_t *gw,
                      uint32_t pfec)
 {
-    return guest_walk_tables(v, p2m_get_hostp2m(v->domain), va, gw, pfec, 
+    return guest_walk_tables(v, p2m_get_hostp2m(v->domain), va, gw, pfec,
 #if GUEST_PAGING_LEVELS == 3 /* PAE */
                              _mfn(INVALID_MFN),
                              v->arch.paging.shadow.gl3e
@@ -323,7 +323,7 @@ gw_remove_write_accesses(struct vcpu *v, unsigned long va, 
walk_t *gw)
 #if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES
 /* Lightweight audit: pass all the shadows associated with this guest walk
  * through the audit mechanisms */
-static void sh_audit_gw(struct vcpu *v, walk_t *gw) 
+static void sh_audit_gw(struct vcpu *v, walk_t *gw)
 {
     mfn_t smfn;
 
@@ -332,32 +332,32 @@ static void sh_audit_gw(struct vcpu *v, walk_t *gw)
 
 #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
     if ( mfn_valid(gw->l4mfn)
-         && mfn_valid((smfn = get_shadow_status(v, gw->l4mfn, 
+         && mfn_valid((smfn = get_shadow_status(v, gw->l4mfn,
                                                 SH_type_l4_shadow))) )
         (void) sh_audit_l4_table(v, smfn, _mfn(INVALID_MFN));
     if ( mfn_valid(gw->l3mfn)
-         && mfn_valid((smfn = get_shadow_status(v, gw->l3mfn, 
+         && mfn_valid((smfn = get_shadow_status(v, gw->l3mfn,
                                                 SH_type_l3_shadow))) )
         (void) sh_audit_l3_table(v, smfn, _mfn(INVALID_MFN));
 #endif /* PAE or 64... */
     if ( mfn_valid(gw->l2mfn) )
     {
-        if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn, 
+        if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn,
                                                  SH_type_l2_shadow))) )
             (void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
 #if GUEST_PAGING_LEVELS == 3
-        if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn, 
+        if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn,
                                                  SH_type_l2h_shadow))) )
             (void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
 #endif
     }
     if ( mfn_valid(gw->l1mfn)
-         && mfn_valid((smfn = get_shadow_status(v, gw->l1mfn, 
+         && mfn_valid((smfn = get_shadow_status(v, gw->l1mfn,
                                                 SH_type_l1_shadow))) )
         (void) sh_audit_l1_table(v, smfn, _mfn(INVALID_MFN));
     else if ( (guest_l2e_get_flags(gw->l2e) & _PAGE_PRESENT)
               && (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)
-              && mfn_valid( 
+              && mfn_valid(
               (smfn = get_fl1_shadow_status(v, guest_l2e_get_gfn(gw->l2e)))) )
         (void) sh_audit_fl1_table(v, smfn, _mfn(INVALID_MFN));
 }
@@ -376,11 +376,11 @@ sh_guest_map_l1e(struct vcpu *v, unsigned long addr,
     walk_t gw;
 
     ASSERT(shadow_mode_translate(v->domain));
-        
+
     // XXX -- this is expensive, but it's easy to cobble together...
     // FIXME!
 
-    if ( sh_walk_guest_tables(v, addr, &gw, PFEC_page_present) == 0 
+    if ( sh_walk_guest_tables(v, addr, &gw, PFEC_page_present) == 0
          && mfn_valid(gw.l1mfn) )
     {
         if ( gl1mfn )
@@ -398,7 +398,7 @@ sh_guest_get_eff_l1e(struct vcpu *v, unsigned long addr, 
void *eff_l1e)
     walk_t gw;
 
     ASSERT(shadow_mode_translate(v->domain));
-        
+
     // XXX -- this is expensive, but it's easy to cobble together...
     // FIXME!
 
@@ -506,12 +506,12 @@ shadow_l4_index(mfn_t *smfn, u32 guest_index)
  */
 
 static always_inline void
-_sh_propagate(struct vcpu *v, 
+_sh_propagate(struct vcpu *v,
               guest_intpte_t guest_intpte,
-              mfn_t target_mfn, 
+              mfn_t target_mfn,
               void *shadow_entry_ptr,
               int level,
-              fetch_type_t ft, 
+              fetch_type_t ft,
               p2m_type_t p2mt)
 {
     guest_l1e_t guest_entry = { guest_intpte };
@@ -537,11 +537,11 @@ _sh_propagate(struct vcpu *v,
     if ( unlikely(!(gflags & _PAGE_PRESENT)) )
     {
 #if !(SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
-        /* If a guest l1 entry is not present, shadow with the magic 
+        /* If a guest l1 entry is not present, shadow with the magic
          * guest-not-present entry. */
         if ( level == 1 )
             *sp = sh_l1e_gnp();
-        else 
+        else
 #endif /* !OOS */
             *sp = shadow_l1e_empty();
         goto done;
@@ -562,7 +562,7 @@ _sh_propagate(struct vcpu *v,
     // return early.
     //
     if ( !mfn_valid(target_mfn)
-         && !(level == 1 && (!shadow_mode_refcounts(d) 
+         && !(level == 1 && (!shadow_mode_refcounts(d)
                              || p2mt == p2m_mmio_direct)) )
     {
         ASSERT((ft == ft_prefetch));
@@ -595,7 +595,7 @@ _sh_propagate(struct vcpu *v,
         ASSERT(!(sflags & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)));
 
         /* compute the PAT index for shadow page entry when VT-d is enabled
-         * and device assigned. 
+         * and device assigned.
          * 1) direct MMIO: compute the PAT index with gMTRR=UC and gPAT.
          * 2) if enables snoop control, compute the PAT index as WB.
          * 3) if disables snoop control, compute the PAT index with
@@ -613,7 +613,7 @@ _sh_propagate(struct vcpu *v,
                             gflags,
                             gfn_to_paddr(target_gfn),
                             pfn_to_paddr(mfn_x(target_mfn)),
-                            MTRR_TYPE_UNCACHABLE); 
+                            MTRR_TYPE_UNCACHABLE);
                 else if ( iommu_snoop )
                     sflags |= pat_type_2_pte_flags(PAT_TYPE_WRBACK);
                 else
@@ -654,12 +654,12 @@ _sh_propagate(struct vcpu *v,
     // Only allow the guest write access to a page a) on a demand fault,
     // or b) if the page is already marked as dirty.
     //
-    // (We handle log-dirty entirely inside the shadow code, without using the 
+    // (We handle log-dirty entirely inside the shadow code, without using the
     // p2m_ram_logdirty p2m type: only HAP uses that.)
     if ( unlikely((level == 1) && shadow_mode_log_dirty(d)) )
     {
         if ( mfn_valid(target_mfn) ) {
-            if ( ft & FETCH_TYPE_WRITE ) 
+            if ( ft & FETCH_TYPE_WRITE )
                 paging_mark_dirty(d, mfn_x(target_mfn));
             else if ( !paging_mfn_is_dirty(d, target_mfn) )
                 sflags &= ~_PAGE_RW;
@@ -682,10 +682,10 @@ _sh_propagate(struct vcpu *v,
          (p2mt == p2m_mmio_direct &&
           rangeset_contains_singleton(mmio_ro_ranges, mfn_x(target_mfn))) )
         sflags &= ~_PAGE_RW;
-    
+
     // protect guest page tables
     //
-    if ( unlikely((level == 1) 
+    if ( unlikely((level == 1)
                   && sh_mfn_is_a_page_table(target_mfn)
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC )
                   /* Unless the page is out of sync and the guest is
@@ -699,7 +699,7 @@ _sh_propagate(struct vcpu *v,
     // PV guests in 64-bit mode use two different page tables for user vs
     // supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
     // It is always shadowed as present...
-    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d) 
+    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d)
          && is_pv_domain(d) )
     {
         sflags |= _PAGE_USER;
@@ -720,7 +720,7 @@ _sh_propagate(struct vcpu *v,
 
 #if GUEST_PAGING_LEVELS >= 4
 static void
-l4e_propagate_from_guest(struct vcpu *v, 
+l4e_propagate_from_guest(struct vcpu *v,
                          guest_l4e_t gl4e,
                          mfn_t sl3mfn,
                          shadow_l4e_t *sl4e,
@@ -732,7 +732,7 @@ l4e_propagate_from_guest(struct vcpu *v,
 static void
 l3e_propagate_from_guest(struct vcpu *v,
                          guest_l3e_t gl3e,
-                         mfn_t sl2mfn, 
+                         mfn_t sl2mfn,
                          shadow_l3e_t *sl3e,
                          fetch_type_t ft)
 {
@@ -741,7 +741,7 @@ l3e_propagate_from_guest(struct vcpu *v,
 #endif // GUEST_PAGING_LEVELS >= 4
 
 static void
-l2e_propagate_from_guest(struct vcpu *v, 
+l2e_propagate_from_guest(struct vcpu *v,
                          guest_l2e_t gl2e,
                          mfn_t sl1mfn,
                          shadow_l2e_t *sl2e,
@@ -751,11 +751,11 @@ l2e_propagate_from_guest(struct vcpu *v,
 }
 
 static void
-l1e_propagate_from_guest(struct vcpu *v, 
+l1e_propagate_from_guest(struct vcpu *v,
                          guest_l1e_t gl1e,
-                         mfn_t gmfn, 
+                         mfn_t gmfn,
                          shadow_l1e_t *sl1e,
-                         fetch_type_t ft, 
+                         fetch_type_t ft,
                          p2m_type_t p2mt)
 {
     _sh_propagate(v, gl1e.l1, gmfn, sl1e, 1, ft, p2mt);
@@ -768,10 +768,10 @@ l1e_propagate_from_guest(struct vcpu *v,
  * functions which ever write (non-zero) data onto a shadow page.
  */
 
-static inline void safe_write_entry(void *dst, void *src) 
+static inline void safe_write_entry(void *dst, void *src)
 /* Copy one PTE safely when processors might be running on the
  * destination pagetable.   This does *not* give safety against
- * concurrent writes (that's what the paging lock is for), just 
+ * concurrent writes (that's what the paging lock is for), just
  * stops the hardware picking up partially written entries. */
 {
     volatile unsigned long *d = dst;
@@ -784,7 +784,7 @@ static inline void safe_write_entry(void *dst, void *src)
 }
 
 
-static inline void 
+static inline void
 shadow_write_entries(void *d, void *s, int entries, mfn_t mfn)
 /* This function does the actual writes to shadow pages.
  * It must not be called directly, since it doesn't do the bookkeeping
@@ -797,10 +797,10 @@ shadow_write_entries(void *d, void *s, int entries, mfn_t 
mfn)
 
     /* Because we mirror access rights at all levels in the shadow, an
      * l2 (or higher) entry with the RW bit cleared will leave us with
-     * no write access through the linear map.  
-     * We detect that by writing to the shadow with copy_to_user() and 
+     * no write access through the linear map.
+     * We detect that by writing to the shadow with copy_to_user() and
      * using map_domain_page() to get a writeable mapping if we need to. */
-    if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 ) 
+    if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 )
     {
         perfc_incr(shadow_linear_map_failed);
         map = sh_map_domain_page(mfn);
@@ -874,7 +874,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl1e, struct domain 
*d, p2m_type_t type)
 
 static void inline
 shadow_put_page_from_l1e(shadow_l1e_t sl1e, struct domain *d)
-{ 
+{
     if ( !shadow_mode_refcounts(d) )
         return;
 
@@ -882,9 +882,9 @@ shadow_put_page_from_l1e(shadow_l1e_t sl1e, struct domain 
*d)
 }
 
 #if GUEST_PAGING_LEVELS >= 4
-static int shadow_set_l4e(struct vcpu *v, 
-                          shadow_l4e_t *sl4e, 
-                          shadow_l4e_t new_sl4e, 
+static int shadow_set_l4e(struct vcpu *v,
+                          shadow_l4e_t *sl4e,
+                          shadow_l4e_t new_sl4e,
                           mfn_t sl4mfn)
 {
     int flags = 0, ok;
@@ -894,13 +894,13 @@ static int shadow_set_l4e(struct vcpu *v,
     old_sl4e = *sl4e;
 
     if ( old_sl4e.l4 == new_sl4e.l4 ) return 0; /* Nothing to do */
-    
-    paddr = ((((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT) 
+
+    paddr = ((((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT)
              | (((unsigned long)sl4e) & ~PAGE_MASK));
 
-    if ( shadow_l4e_get_flags(new_sl4e) & _PAGE_PRESENT ) 
+    if ( shadow_l4e_get_flags(new_sl4e) & _PAGE_PRESENT )
     {
-        /* About to install a new reference */        
+        /* About to install a new reference */
         mfn_t sl3mfn = shadow_l4e_get_mfn(new_sl4e);
         ok = sh_get_ref(v, sl3mfn, paddr);
         /* Are we pinning l3 shadows to handle wierd linux behaviour? */
@@ -917,12 +917,12 @@ static int shadow_set_l4e(struct vcpu *v,
     shadow_write_entries(sl4e, &new_sl4e, 1, sl4mfn);
     flags |= SHADOW_SET_CHANGED;
 
-    if ( shadow_l4e_get_flags(old_sl4e) & _PAGE_PRESENT ) 
+    if ( shadow_l4e_get_flags(old_sl4e) & _PAGE_PRESENT )
     {
         /* We lost a reference to an old mfn. */
         mfn_t osl3mfn = shadow_l4e_get_mfn(old_sl4e);
         if ( (mfn_x(osl3mfn) != mfn_x(shadow_l4e_get_mfn(new_sl4e)))
-             || !perms_strictly_increased(shadow_l4e_get_flags(old_sl4e), 
+             || !perms_strictly_increased(shadow_l4e_get_flags(old_sl4e),
                                           shadow_l4e_get_flags(new_sl4e)) )
         {
             flags |= SHADOW_SET_FLUSH;
@@ -932,9 +932,9 @@ static int shadow_set_l4e(struct vcpu *v,
     return flags;
 }
 
-static int shadow_set_l3e(struct vcpu *v, 
-                          shadow_l3e_t *sl3e, 
-                          shadow_l3e_t new_sl3e, 
+static int shadow_set_l3e(struct vcpu *v,
+                          shadow_l3e_t *sl3e,
+                          shadow_l3e_t new_sl3e,
                           mfn_t sl3mfn)
 {
     int flags = 0;
@@ -945,12 +945,12 @@ static int shadow_set_l3e(struct vcpu *v,
 
     if ( old_sl3e.l3 == new_sl3e.l3 ) return 0; /* Nothing to do */
 
-    paddr = ((((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT) 
+    paddr = ((((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT)
              | (((unsigned long)sl3e) & ~PAGE_MASK));
-    
+
     if ( shadow_l3e_get_flags(new_sl3e) & _PAGE_PRESENT )
     {
-        /* About to install a new reference */        
+        /* About to install a new reference */
         if ( !sh_get_ref(v, shadow_l3e_get_mfn(new_sl3e), paddr) )
         {
             domain_crash(v->domain);
@@ -962,13 +962,13 @@ static int shadow_set_l3e(struct vcpu *v,
     shadow_write_entries(sl3e, &new_sl3e, 1, sl3mfn);
     flags |= SHADOW_SET_CHANGED;
 
-    if ( shadow_l3e_get_flags(old_sl3e) & _PAGE_PRESENT ) 
+    if ( shadow_l3e_get_flags(old_sl3e) & _PAGE_PRESENT )
     {
         /* We lost a reference to an old mfn. */
         mfn_t osl2mfn = shadow_l3e_get_mfn(old_sl3e);
         if ( (mfn_x(osl2mfn) != mfn_x(shadow_l3e_get_mfn(new_sl3e))) ||
-             !perms_strictly_increased(shadow_l3e_get_flags(old_sl3e), 
-                                       shadow_l3e_get_flags(new_sl3e)) ) 
+             !perms_strictly_increased(shadow_l3e_get_flags(old_sl3e),
+                                       shadow_l3e_get_flags(new_sl3e)) )
         {
             flags |= SHADOW_SET_FLUSH;
         }
@@ -976,11 +976,11 @@ static int shadow_set_l3e(struct vcpu *v,
     }
     return flags;
 }
-#endif /* GUEST_PAGING_LEVELS >= 4 */ 
+#endif /* GUEST_PAGING_LEVELS >= 4 */
 
-static int shadow_set_l2e(struct vcpu *v, 
-                          shadow_l2e_t *sl2e, 
-                          shadow_l2e_t new_sl2e, 
+static int shadow_set_l2e(struct vcpu *v,
+                          shadow_l2e_t *sl2e,
+                          shadow_l2e_t new_sl2e,
                           mfn_t sl2mfn)
 {
     int flags = 0;
@@ -990,7 +990,7 @@ static int shadow_set_l2e(struct vcpu *v,
 #if GUEST_PAGING_LEVELS == 2
     /* In 2-on-3 we work with pairs of l2es pointing at two-page
      * shadows.  Reference counting and up-pointers track from the first
-     * page of the shadow to the first l2e, so make sure that we're 
+     * page of the shadow to the first l2e, so make sure that we're
      * working with those:
      * Start with a pair of identical entries */
     shadow_l2e_t pair[2] = { new_sl2e, new_sl2e };
@@ -1000,13 +1000,13 @@ static int shadow_set_l2e(struct vcpu *v,
 
     ASSERT(sl2e != NULL);
     old_sl2e = *sl2e;
-    
+
     if ( old_sl2e.l2 == new_sl2e.l2 ) return 0; /* Nothing to do */
-    
+
     paddr = ((((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT)
              | (((unsigned long)sl2e) & ~PAGE_MASK));
 
-    if ( shadow_l2e_get_flags(new_sl2e) & _PAGE_PRESENT ) 
+    if ( shadow_l2e_get_flags(new_sl2e) & _PAGE_PRESENT )
     {
         mfn_t sl1mfn = shadow_l2e_get_mfn(new_sl2e);
         ASSERT(mfn_to_page(sl1mfn)->u.sh.head);
@@ -1028,7 +1028,7 @@ static int shadow_set_l2e(struct vcpu *v,
                the GFN instead of the GMFN, and it's definitely not
                OOS. */
             if ( (sp->u.sh.type != SH_type_fl1_shadow) && mfn_valid(gl1mfn)
-                 && mfn_is_out_of_sync(gl1mfn) ) 
+                 && mfn_is_out_of_sync(gl1mfn) )
                 sh_resync(v, gl1mfn);
         }
 #endif
@@ -1047,13 +1047,13 @@ static int shadow_set_l2e(struct vcpu *v,
 #endif
     flags |= SHADOW_SET_CHANGED;
 
-    if ( shadow_l2e_get_flags(old_sl2e) & _PAGE_PRESENT ) 
+    if ( shadow_l2e_get_flags(old_sl2e) & _PAGE_PRESENT )
     {
         /* We lost a reference to an old mfn. */
         mfn_t osl1mfn = shadow_l2e_get_mfn(old_sl2e);
         if ( (mfn_x(osl1mfn) != mfn_x(shadow_l2e_get_mfn(new_sl2e))) ||
-             !perms_strictly_increased(shadow_l2e_get_flags(old_sl2e), 
-                                       shadow_l2e_get_flags(new_sl2e)) ) 
+             !perms_strictly_increased(shadow_l2e_get_flags(old_sl2e),
+                                       shadow_l2e_get_flags(new_sl2e)) )
         {
             flags |= SHADOW_SET_FLUSH;
         }
@@ -1066,7 +1066,7 @@ static inline void shadow_vram_get_l1e(shadow_l1e_t 
new_sl1e,
                                        shadow_l1e_t *sl1e,
                                        mfn_t sl1mfn,
                                        struct domain *d)
-{ 
+{
     mfn_t mfn = shadow_l1e_get_mfn(new_sl1e);
     int flags = shadow_l1e_get_flags(new_sl1e);
     unsigned long gfn;
@@ -1085,7 +1085,7 @@ static inline void shadow_vram_get_l1e(shadow_l1e_t 
new_sl1e,
     {
         unsigned long i = gfn - dirty_vram->begin_pfn;
         struct page_info *page = mfn_to_page(mfn);
-        
+
         if ( (page->u.inuse.type_info & PGT_count_mask) == 1 )
             /* Initial guest reference, record it */
             dirty_vram->sl1ma[i] = pfn_to_paddr(mfn_x(sl1mfn))
@@ -1159,8 +1159,8 @@ static inline void shadow_vram_put_l1e(shadow_l1e_t 
old_sl1e,
     }
 }
 
-static int shadow_set_l1e(struct vcpu *v, 
-                          shadow_l1e_t *sl1e, 
+static int shadow_set_l1e(struct vcpu *v,
+                          shadow_l1e_t *sl1e,
                           shadow_l1e_t new_sl1e,
                           p2m_type_t new_type,
                           mfn_t sl1mfn)
@@ -1179,15 +1179,15 @@ static int shadow_set_l1e(struct vcpu *v,
              == (_PAGE_RW|_PAGE_PRESENT)) )
         oos_fixup_add(v, new_gmfn, sl1mfn, pgentry_ptr_to_slot(sl1e));
 #endif
-    
+
     old_sl1e = *sl1e;
 
     if ( old_sl1e.l1 == new_sl1e.l1 ) return 0; /* Nothing to do */
-    
+
     if ( (shadow_l1e_get_flags(new_sl1e) & _PAGE_PRESENT)
-         && !sh_l1e_is_magic(new_sl1e) ) 
+         && !sh_l1e_is_magic(new_sl1e) )
     {
-        /* About to install a new reference */        
+        /* About to install a new reference */
         if ( shadow_mode_refcounts(d) ) {
             TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_GET_REF);
             switch ( shadow_get_page_from_l1e(new_sl1e, d, new_type) )
@@ -1205,45 +1205,45 @@ static int shadow_set_l1e(struct vcpu *v,
                 break;
             }
         }
-    } 
+    }
 
     /* Write the new entry */
     shadow_write_entries(sl1e, &new_sl1e, 1, sl1mfn);
     flags |= SHADOW_SET_CHANGED;
 
-    if ( (shadow_l1e_get_flags(old_sl1e) & _PAGE_PRESENT) 
+    if ( (shadow_l1e_get_flags(old_sl1e) & _PAGE_PRESENT)
          && !sh_l1e_is_magic(old_sl1e) )
     {
         /* We lost a reference to an old mfn. */
-        /* N.B. Unlike higher-level sets, never need an extra flush 
-         * when writing an l1e.  Because it points to the same guest frame 
+        /* N.B. Unlike higher-level sets, never need an extra flush
+         * when writing an l1e.  Because it points to the same guest frame
          * as the guest l1e did, it's the guest's responsibility to
          * trigger a flush later. */
-        if ( shadow_mode_refcounts(d) ) 
+        if ( shadow_mode_refcounts(d) )
         {
             shadow_vram_put_l1e(old_sl1e, sl1e, sl1mfn, d);
             shadow_put_page_from_l1e(old_sl1e, d);
             TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_PUT_REF);
-        } 
+        }
     }
     return flags;
 }
 
 
 /**************************************************************************/
-/* Macros to walk pagetables.  These take the shadow of a pagetable and 
- * walk every "interesting" entry.  That is, they don't touch Xen mappings, 
- * and for 32-bit l2s shadowed onto PAE or 64-bit, they only touch every 
+/* Macros to walk pagetables.  These take the shadow of a pagetable and
+ * walk every "interesting" entry.  That is, they don't touch Xen mappings,
+ * and for 32-bit l2s shadowed onto PAE or 64-bit, they only touch every
  * second entry (since pairs of entries are managed together). For multi-page
  * shadows they walk all pages.
- * 
- * Arguments are an MFN, the variable to point to each entry, a variable 
- * to indicate that we are done (we will shortcut to the end of the scan 
+ *
+ * Arguments are an MFN, the variable to point to each entry, a variable
+ * to indicate that we are done (we will shortcut to the end of the scan
  * when _done != 0), a variable to indicate that we should avoid Xen mappings,
- * and the code. 
+ * and the code.
  *
- * WARNING: These macros have side-effects.  They change the values of both 
- * the pointer and the MFN. */ 
+ * WARNING: These macros have side-effects.  They change the values of both
+ * the pointer and the MFN. */
 
 static inline void increment_ptr_to_guest_entry(void *ptr)
 {
@@ -1288,7 +1288,7 @@ do {                                                      
              \
 #define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)         \
        _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)
 #endif
-    
+
 
 #if GUEST_PAGING_LEVELS == 2
 
@@ -1335,7 +1335,7 @@ do {                                                      
                 \
     sh_unmap_domain_page(_sp);                                             \
 } while (0)
 
-#else 
+#else
 
 /* 64-bit l2: touch all entries except for PAE compat guests. */
 #define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)       \
@@ -1424,7 +1424,7 @@ void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t 
gl4mfn, mfn_t sl4mfn)
 
     sl4e = sh_map_domain_page(sl4mfn);
     BUILD_BUG_ON(sizeof (l4_pgentry_t) != sizeof (shadow_l4e_t));
-    
+
     /* Copy the common Xen mappings from the idle domain */
     slots = (shadow_mode_external(d)
              ? ROOT_PAGETABLE_XEN_SLOTS
@@ -1458,7 +1458,7 @@ void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t 
gl4mfn, mfn_t sl4mfn)
             shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR);
     }
 
-    sh_unmap_domain_page(sl4e);    
+    sh_unmap_domain_page(sl4e);
 }
 #endif
 
@@ -1504,12 +1504,12 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 
shadow_type)
         mfn_to_page(smfn)->up = 0;
 
 #if GUEST_PAGING_LEVELS == 4
-#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
     if ( shadow_type == SH_type_l4_64_shadow &&
          unlikely(v->domain->arch.paging.shadow.opt_flags & 
SHOPT_LINUX_L3_TOPLEVEL) )
     {
         /* We're shadowing a new l4, but we've been assuming the guest uses
-         * only one l4 per vcpu and context switches using an l4 entry. 
+         * only one l4 per vcpu and context switches using an l4 entry.
          * Count the number of active l4 shadows.  If there are enough
          * of them, decide that this isn't an old linux guest, and stop
          * pinning l3es.  This is not very quick but it doesn't happen
@@ -1522,9 +1522,9 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 
shadow_type)
             if ( sp->u.sh.type == SH_type_l4_64_shadow )
                 l4count++;
         }
-        for_each_vcpu ( v->domain, v2 ) 
+        for_each_vcpu ( v->domain, v2 )
             vcpus++;
-        if ( l4count > 2 * vcpus ) 
+        if ( l4count > 2 * vcpus )
         {
             /* Unpin all the pinned l3 tables, and don't pin any more. */
             page_list_for_each_safe(sp, t, 
&v->domain->arch.paging.shadow.pinned_shadows)
@@ -1542,7 +1542,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 
shadow_type)
     // Create the Xen mappings...
     if ( !shadow_mode_external(v->domain) )
     {
-        switch (shadow_type) 
+        switch (shadow_type)
         {
 #if GUEST_PAGING_LEVELS == 4
         case SH_type_l4_shadow:
@@ -1584,7 +1584,7 @@ sh_make_monitor_table(struct vcpu *v)
     struct domain *d = v->domain;
 
     ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
-    
+
     /* Guarantee we can get the memory we need */
     shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS);
 
@@ -1599,8 +1599,8 @@ sh_make_monitor_table(struct vcpu *v)
             mfn_t m3mfn, m2mfn;
             l4_pgentry_t *l4e;
             l3_pgentry_t *l3e;
-            /* Install an l3 table and an l2 table that will hold the shadow 
-             * linear map entries.  This overrides the linear map entry that 
+            /* Install an l3 table and an l2 table that will hold the shadow
+             * linear map entries.  This overrides the linear map entry that
              * was installed by sh_install_xen_entries_in_l4. */
             l4e = sh_map_domain_page(m4mfn);
 
@@ -1622,7 +1622,7 @@ sh_make_monitor_table(struct vcpu *v)
                 m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
                 mfn_to_page(m3mfn)->shadow_flags = 3;
                 l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
-                
+
                 m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
                 mfn_to_page(m2mfn)->shadow_flags = 2;
                 l3e = sh_map_domain_page(m3mfn);
@@ -1647,13 +1647,13 @@ sh_make_monitor_table(struct vcpu *v)
  * If the necessary tables are not present in the guest, they return NULL. */
 
 /* N.B. The use of GUEST_PAGING_LEVELS here is correct.  If the shadow has
- * more levels than the guest, the upper levels are always fixed and do not 
- * reflect any information from the guest, so we do not use these functions 
+ * more levels than the guest, the upper levels are always fixed and do not
+ * reflect any information from the guest, so we do not use these functions
  * to access them. */
 
 #if GUEST_PAGING_LEVELS >= 4
-static shadow_l4e_t * shadow_get_and_create_l4e(struct vcpu *v, 
-                                                walk_t *gw, 
+static shadow_l4e_t * shadow_get_and_create_l4e(struct vcpu *v,
+                                                walk_t *gw,
                                                 mfn_t *sl4mfn)
 {
     /* There is always a shadow of the top level table.  Get it. */
@@ -1662,8 +1662,8 @@ static shadow_l4e_t * shadow_get_and_create_l4e(struct 
vcpu *v,
     return sh_linear_l4_table(v) + shadow_l4_linear_offset(gw->va);
 }
 
-static shadow_l3e_t * shadow_get_and_create_l3e(struct vcpu *v, 
-                                                walk_t *gw, 
+static shadow_l3e_t * shadow_get_and_create_l3e(struct vcpu *v,
+                                                walk_t *gw,
                                                 mfn_t *sl3mfn,
                                                 fetch_type_t ft,
                                                 int *resync)
@@ -1674,18 +1674,18 @@ static shadow_l3e_t * shadow_get_and_create_l3e(struct 
vcpu *v,
     /* Get the l4e */
     sl4e = shadow_get_and_create_l4e(v, gw, &sl4mfn);
     ASSERT(sl4e != NULL);
-    if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT ) 
+    if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT )
     {
         *sl3mfn = shadow_l4e_get_mfn(*sl4e);
         ASSERT(mfn_valid(*sl3mfn));
-    } 
-    else 
+    }
+    else
     {
         int r;
         shadow_l4e_t new_sl4e;
         /* No l3 shadow installed: find and install it. */
         *sl3mfn = get_shadow_status(v, gw->l3mfn, SH_type_l3_shadow);
-        if ( !mfn_valid(*sl3mfn) ) 
+        if ( !mfn_valid(*sl3mfn) )
         {
             /* No l3 shadow of this page exists at all: make one. */
             *sl3mfn = sh_make_shadow(v, gw->l3mfn, SH_type_l3_shadow);
@@ -1708,8 +1708,8 @@ static shadow_l3e_t * shadow_get_and_create_l3e(struct 
vcpu *v,
 #endif /* GUEST_PAGING_LEVELS >= 4 */
 
 
-static shadow_l2e_t * shadow_get_and_create_l2e(struct vcpu *v, 
-                                                walk_t *gw, 
+static shadow_l2e_t * shadow_get_and_create_l2e(struct vcpu *v,
+                                                walk_t *gw,
                                                 mfn_t *sl2mfn,
                                                 fetch_type_t ft,
                                                 int *resync)
@@ -1720,13 +1720,13 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct 
vcpu *v,
     if ( !mfn_valid(gw->l2mfn) ) return NULL; /* No guest page. */
     /* Get the l3e */
     sl3e = shadow_get_and_create_l3e(v, gw, &sl3mfn, ft, resync);
-    if ( sl3e == NULL ) return NULL; 
-    if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT ) 
+    if ( sl3e == NULL ) return NULL;
+    if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT )
     {
         *sl2mfn = shadow_l3e_get_mfn(*sl3e);
         ASSERT(mfn_valid(*sl2mfn));
-    } 
-    else 
+    }
+    else
     {
         int r;
         shadow_l3e_t new_sl3e;
@@ -1740,7 +1740,7 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct 
vcpu *v,
 
         /* No l2 shadow installed: find and install it. */
         *sl2mfn = get_shadow_status(v, gw->l2mfn, t);
-        if ( !mfn_valid(*sl2mfn) ) 
+        if ( !mfn_valid(*sl2mfn) )
         {
             /* No l2 shadow of this page exists at all: make one. */
             *sl2mfn = sh_make_shadow(v, gw->l2mfn, t);
@@ -1750,7 +1750,7 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct 
vcpu *v,
         r = shadow_set_l3e(v, sl3e, new_sl3e, sl3mfn);
         ASSERT((r & SHADOW_SET_FLUSH) == 0);
         if ( r & SHADOW_SET_ERROR )
-            return NULL;        
+            return NULL;
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC )
         *resync |= 1;
@@ -1762,9 +1762,9 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct 
vcpu *v,
 #elif GUEST_PAGING_LEVELS == 3 /* PAE... */
     /* We never demand-shadow PAE l3es: they are only created in
      * sh_update_cr3().  Check if the relevant sl3e is present. */
-    shadow_l3e_t *sl3e = ((shadow_l3e_t *)&v->arch.paging.shadow.l3table) 
+    shadow_l3e_t *sl3e = ((shadow_l3e_t *)&v->arch.paging.shadow.l3table)
         + shadow_l3_linear_offset(gw->va);
-    if ( !(shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT) ) 
+    if ( !(shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT) )
         return NULL;
     *sl2mfn = shadow_l3e_get_mfn(*sl3e);
     ASSERT(mfn_valid(*sl2mfn));
@@ -1778,12 +1778,12 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct 
vcpu *v,
     (void) shadow_l2_index(sl2mfn, guest_l2_table_offset(gw->va));
     /* Reading the top level table is always valid. */
     return sh_linear_l2_table(v) + shadow_l2_linear_offset(gw->va);
-#endif 
+#endif
 }
 
 
-static shadow_l1e_t * shadow_get_and_create_l1e(struct vcpu *v, 
-                                                walk_t *gw, 
+static shadow_l1e_t * shadow_get_and_create_l1e(struct vcpu *v,
+                                                walk_t *gw,
                                                 mfn_t *sl1mfn,
                                                 fetch_type_t ft)
 {
@@ -1797,38 +1797,38 @@ static shadow_l1e_t * shadow_get_and_create_l1e(struct 
vcpu *v,
 
     /* Install the sl1 in the l2e if it wasn't there or if we need to
      * re-do it to fix a PSE dirty bit. */
-    if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT 
+    if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT
          && likely(ft != ft_demand_write
-                   || (shadow_l2e_get_flags(*sl2e) & _PAGE_RW) 
+                   || (shadow_l2e_get_flags(*sl2e) & _PAGE_RW)
                    || !(guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)) )
     {
         *sl1mfn = shadow_l2e_get_mfn(*sl2e);
         ASSERT(mfn_valid(*sl1mfn));
-    } 
-    else 
+    }
+    else
     {
         shadow_l2e_t new_sl2e;
         int r, flags = guest_l2e_get_flags(gw->l2e);
         /* No l1 shadow installed: find and install it. */
         if ( !(flags & _PAGE_PRESENT) )
             return NULL; /* No guest page. */
-        if ( guest_supports_superpages(v) && (flags & _PAGE_PSE) ) 
+        if ( guest_supports_superpages(v) && (flags & _PAGE_PSE) )
         {
             /* Splintering a superpage */
             gfn_t l2gfn = guest_l2e_get_gfn(gw->l2e);
             *sl1mfn = get_fl1_shadow_status(v, l2gfn);
-            if ( !mfn_valid(*sl1mfn) ) 
+            if ( !mfn_valid(*sl1mfn) )
             {
                 /* No fl1 shadow of this superpage exists at all: make one. */
                 *sl1mfn = make_fl1_shadow(v, l2gfn);
             }
-        } 
-        else 
+        }
+        else
         {
             /* Shadowing an actual guest l1 table */
             if ( !mfn_valid(gw->l1mfn) ) return NULL; /* No guest page. */
             *sl1mfn = get_shadow_status(v, gw->l1mfn, SH_type_l1_shadow);
-            if ( !mfn_valid(*sl1mfn) ) 
+            if ( !mfn_valid(*sl1mfn) )
             {
                 /* No l1 shadow of this page exists at all: make one. */
                 *sl1mfn = sh_make_shadow(v, gw->l1mfn, SH_type_l1_shadow);
@@ -1837,7 +1837,7 @@ static shadow_l1e_t * shadow_get_and_create_l1e(struct 
vcpu *v,
         /* Install the new sl1 table in the sl2e */
         l2e_propagate_from_guest(v, gw->l2e, *sl1mfn, &new_sl2e, ft);
         r = shadow_set_l2e(v, sl2e, new_sl2e, sl2mfn);
-        ASSERT((r & SHADOW_SET_FLUSH) == 0);        
+        ASSERT((r & SHADOW_SET_FLUSH) == 0);
         if ( r & SHADOW_SET_ERROR )
             return NULL;
 
@@ -1863,7 +1863,7 @@ static shadow_l1e_t * shadow_get_and_create_l1e(struct 
vcpu *v,
 
 
 /**************************************************************************/
-/* Destructors for shadow tables: 
+/* Destructors for shadow tables:
  * Unregister the shadow, decrement refcounts of any entries present in it,
  * and release the memory.
  *
@@ -1890,16 +1890,16 @@ void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn)
     delete_shadow_status(v, gmfn, t, smfn);
     shadow_demote(v, gmfn, t);
     /* Decrement refcounts of all the old entries */
-    sl4mfn = smfn; 
+    sl4mfn = smfn;
     SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, v->domain, {
-        if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT ) 
+        if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT )
         {
             sh_put_ref(v, shadow_l4e_get_mfn(*sl4e),
-                       (((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT) 
+                       (((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT)
                        | ((unsigned long)sl4e & ~PAGE_MASK));
         }
     });
-    
+
     /* Put the memory back in the pool */
     shadow_free(v->domain, smfn);
 }
@@ -1922,11 +1922,11 @@ void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn)
     shadow_demote(v, gmfn, t);
 
     /* Decrement refcounts of all the old entries */
-    sl3mfn = smfn; 
+    sl3mfn = smfn;
     SHADOW_FOREACH_L3E(sl3mfn, sl3e, 0, 0, {
-        if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT ) 
+        if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT )
             sh_put_ref(v, shadow_l3e_get_mfn(*sl3e),
-                        (((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT) 
+                        (((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT)
                         | ((unsigned long)sl3e & ~PAGE_MASK));
     });
 
@@ -1961,9 +1961,9 @@ void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn)
     /* Decrement refcounts of all the old entries */
     sl2mfn = smfn;
     SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
-        if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT ) 
+        if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT )
             sh_put_ref(v, shadow_l2e_get_mfn(*sl2e),
-                        (((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT) 
+                        (((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT)
                         | ((unsigned long)sl2e & ~PAGE_MASK));
     });
 
@@ -1989,17 +1989,17 @@ void sh_destroy_l1_shadow(struct vcpu *v, mfn_t smfn)
         gfn_t gfn = _gfn(sp->v.sh.back);
         delete_fl1_shadow_status(v, gfn, smfn);
     }
-    else 
+    else
     {
         mfn_t gmfn = backpointer(sp);
         delete_shadow_status(v, gmfn, t, smfn);
         shadow_demote(v, gmfn, t);
     }
-    
+
     if ( shadow_mode_refcounts(d) )
     {
         /* Decrement refcounts of all the old entries */
-        mfn_t sl1mfn = smfn; 
+        mfn_t sl1mfn = smfn;
         SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, 0, {
             if ( (shadow_l1e_get_flags(*sl1e) & _PAGE_PRESENT)
                  && !sh_l1e_is_magic(*sl1e) ) {
@@ -2008,7 +2008,7 @@ void sh_destroy_l1_shadow(struct vcpu *v, mfn_t smfn)
             }
         });
     }
-    
+
     /* Put the memory back in the pool */
     shadow_free(v->domain, smfn);
 }
@@ -2025,8 +2025,8 @@ void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
         l4_pgentry_t *l4e = sh_map_domain_page(mmfn);
         l3_pgentry_t *l3e;
         int linear_slot = shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START);
- 
-        /* Need to destroy the l3 and l2 monitor pages used 
+
+        /* Need to destroy the l3 and l2 monitor pages used
          * for the linear map */
         ASSERT(l4e_get_flags(l4e[linear_slot]) & _PAGE_PRESENT);
         m3mfn = _mfn(l4e_get_pfn(l4e[linear_slot]));
@@ -2060,18 +2060,18 @@ void sh_destroy_monitor_table(struct vcpu *v, mfn_t 
mmfn)
 /**************************************************************************/
 /* Functions to destroy non-Xen mappings in a pagetable hierarchy.
  * These are called from common code when we are running out of shadow
- * memory, and unpinning all the top-level shadows hasn't worked. 
+ * memory, and unpinning all the top-level shadows hasn't worked.
  *
  * With user_only == 1, we leave guest kernel-mode mappings in place too,
  * unhooking only the user-mode mappings
  *
- * This implementation is pretty crude and slow, but we hope that it won't 
+ * This implementation is pretty crude and slow, but we hope that it won't
  * be called very often. */
 
 #if GUEST_PAGING_LEVELS == 2
 
 void sh_unhook_32b_mappings(struct vcpu *v, mfn_t sl2mfn, int user_only)
-{    
+{
     shadow_l2e_t *sl2e;
     SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
         if ( !user_only || (sl2e->l2 & _PAGE_USER) )
@@ -2109,7 +2109,7 @@ void sh_unhook_64b_mappings(struct vcpu *v, mfn_t sl4mfn, 
int user_only)
  * These functions require a pointer to the shadow entry that will be updated.
  */
 
-/* These functions take a new guest entry, translate it to shadow and write 
+/* These functions take a new guest entry, translate it to shadow and write
  * the shadow entry.
  *
  * They return the same bitmaps as the shadow_set_lXe() functions.
@@ -2240,7 +2240,7 @@ static int validate_gl2e(struct vcpu *v, void *new_ge, 
mfn_t sl2mfn, void *se)
             mfn_t gl1mfn = get_gfn_query_unlocked(v->domain, gfn_x(gl1gfn),
                                                   &p2mt);
             if ( p2m_is_ram(p2mt) )
-                sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow); 
+                sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
             else if ( p2mt != p2m_populate_on_demand )
                 result |= SHADOW_SET_ERROR;
         }
@@ -2275,7 +2275,7 @@ static int validate_gl1e(struct vcpu *v, void *new_ge, 
mfn_t sl1mfn, void *se)
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     gl1mfn = backpointer(mfn_to_page(sl1mfn));
-    if ( mfn_valid(gl1mfn) 
+    if ( mfn_valid(gl1mfn)
          && mfn_is_out_of_sync(gl1mfn) )
     {
         /* Update the OOS snapshot. */
@@ -2295,7 +2295,7 @@ static int validate_gl1e(struct vcpu *v, void *new_ge, 
mfn_t sl1mfn, void *se)
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
 /**************************************************************************/
-/* Special validation function for re-syncing out-of-sync shadows. 
+/* Special validation function for re-syncing out-of-sync shadows.
  * Walks the *shadow* page, and for every entry that it finds,
  * revalidates the guest entry that corresponds to it.
  * N.B. This function is called with the vcpu that unsynced the page,
@@ -2342,10 +2342,10 @@ void sh_resync_l1(struct vcpu *v, mfn_t gl1mfn, mfn_t 
snpmfn)
     ASSERT(!(rc & SHADOW_SET_FLUSH));
 }
 
-/* Figure out whether it's definitely safe not to sync this l1 table. 
- * That is: if we can tell that it's only used once, and that the 
- * toplevel shadow responsible is not one of ours. 
- * N.B. This function is called with the vcpu that required the resync, 
+/* Figure out whether it's definitely safe not to sync this l1 table.
+ * That is: if we can tell that it's only used once, and that the
+ * toplevel shadow responsible is not one of ours.
+ * N.B. This function is called with the vcpu that required the resync,
  *      *not* the one that originally unsynced the page, but it is
  *      called in the *mode* of the vcpu that unsynced it.  Clear?  Good. */
 int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
@@ -2366,7 +2366,7 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
     smfn = _mfn(sp->up >> PAGE_SHIFT);
     ASSERT(mfn_valid(smfn));
 
-#if (SHADOW_PAGING_LEVELS == 4) 
+#if (SHADOW_PAGING_LEVELS == 4)
     /* up to l3 */
     sp = mfn_to_page(smfn);
     ASSERT(sh_type_has_up_pointer(v, SH_type_l2_shadow));
@@ -2385,15 +2385,15 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
 #endif
 
     if ( pagetable_get_pfn(v->arch.shadow_table[0]) == mfn_x(smfn)
-#if (SHADOW_PAGING_LEVELS == 3) 
+#if (SHADOW_PAGING_LEVELS == 3)
          || pagetable_get_pfn(v->arch.shadow_table[1]) == mfn_x(smfn)
          || pagetable_get_pfn(v->arch.shadow_table[2]) == mfn_x(smfn)
-         || pagetable_get_pfn(v->arch.shadow_table[3]) == mfn_x(smfn) 
+         || pagetable_get_pfn(v->arch.shadow_table[3]) == mfn_x(smfn)
 #endif
         )
         return 0;
-    
-    /* Only in use in one toplevel shadow, and it's not the one we're 
+
+    /* Only in use in one toplevel shadow, and it's not the one we're
      * running on */
     return 1;
 }
@@ -2401,15 +2401,15 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
 
 
 /**************************************************************************/
-/* Functions which translate and install the shadows of arbitrary guest 
+/* Functions which translate and install the shadows of arbitrary guest
  * entries that we have just seen the guest write. */
 
 
-static inline int 
+static inline int
 sh_map_and_validate(struct vcpu *v, mfn_t gmfn,
-                     void *new_gp, u32 size, u32 sh_type, 
+                     void *new_gp, u32 size, u32 sh_type,
                      u32 (*shadow_index)(mfn_t *smfn, u32 idx),
-                     int (*validate_ge)(struct vcpu *v, void *ge, 
+                     int (*validate_ge)(struct vcpu *v, void *ge,
                                         mfn_t smfn, void *se))
 /* Generic function for mapping and validating. */
 {
@@ -2462,25 +2462,25 @@ sh_map_and_validate_gl4e(struct vcpu *v, mfn_t gl4mfn,
                           void *new_gl4p, u32 size)
 {
 #if GUEST_PAGING_LEVELS >= 4
-    return sh_map_and_validate(v, gl4mfn, new_gl4p, size, 
-                                SH_type_l4_shadow, 
-                                shadow_l4_index, 
+    return sh_map_and_validate(v, gl4mfn, new_gl4p, size,
+                                SH_type_l4_shadow,
+                                shadow_l4_index,
                                 validate_gl4e);
 #else // ! GUEST_PAGING_LEVELS >= 4
     SHADOW_ERROR("called in wrong paging mode!\n");
     BUG();
     return 0;
-#endif 
+#endif
 }
-    
+
 int
 sh_map_and_validate_gl3e(struct vcpu *v, mfn_t gl3mfn,
                           void *new_gl3p, u32 size)
 {
 #if GUEST_PAGING_LEVELS >= 4
-    return sh_map_and_validate(v, gl3mfn, new_gl3p, size, 
-                                SH_type_l3_shadow, 
-                                shadow_l3_index, 
+    return sh_map_and_validate(v, gl3mfn, new_gl3p, size,
+                                SH_type_l3_shadow,
+                                shadow_l3_index,
                                 validate_gl3e);
 #else // ! GUEST_PAGING_LEVELS >= 4
     SHADOW_ERROR("called in wrong paging mode!\n");
@@ -2493,9 +2493,9 @@ int
 sh_map_and_validate_gl2e(struct vcpu *v, mfn_t gl2mfn,
                           void *new_gl2p, u32 size)
 {
-    return sh_map_and_validate(v, gl2mfn, new_gl2p, size, 
-                                SH_type_l2_shadow, 
-                                shadow_l2_index, 
+    return sh_map_and_validate(v, gl2mfn, new_gl2p, size,
+                                SH_type_l2_shadow,
+                                shadow_l2_index,
                                 validate_gl2e);
 }
 
@@ -2504,9 +2504,9 @@ sh_map_and_validate_gl2he(struct vcpu *v, mfn_t gl2mfn,
                            void *new_gl2p, u32 size)
 {
 #if GUEST_PAGING_LEVELS >= 3
-    return sh_map_and_validate(v, gl2mfn, new_gl2p, size, 
-                                SH_type_l2h_shadow, 
-                                shadow_l2_index, 
+    return sh_map_and_validate(v, gl2mfn, new_gl2p, size,
+                                SH_type_l2h_shadow,
+                                shadow_l2_index,
                                 validate_gl2e);
 #else /* Non-PAE guests don't have different kinds of l2 table */
     SHADOW_ERROR("called in wrong paging mode!\n");
@@ -2519,9 +2519,9 @@ int
 sh_map_and_validate_gl1e(struct vcpu *v, mfn_t gl1mfn,
                           void *new_gl1p, u32 size)
 {
-    return sh_map_and_validate(v, gl1mfn, new_gl1p, size, 
-                                SH_type_l1_shadow, 
-                                shadow_l1_index, 
+    return sh_map_and_validate(v, gl1mfn, new_gl1p, size,
+                                SH_type_l1_shadow,
+                                shadow_l1_index,
                                 validate_gl1e);
 }
 
@@ -2572,7 +2572,7 @@ static inline void reset_early_unshadow(struct vcpu *v)
 
 
 /**************************************************************************/
-/* Optimization: Prefetch multiple L1 entries.  This is called after we have 
+/* Optimization: Prefetch multiple L1 entries.  This is called after we have
  * demand-faulted a shadow l1e in the fault handler, to see if it's
  * worth fetching some more.
  */
@@ -2582,7 +2582,7 @@ static inline void reset_early_unshadow(struct vcpu *v)
 /* XXX magic number */
 #define PREFETCH_DISTANCE 32
 
-static void sh_prefetch(struct vcpu *v, walk_t *gw, 
+static void sh_prefetch(struct vcpu *v, walk_t *gw,
                         shadow_l1e_t *ptr_sl1e, mfn_t sl1mfn)
 {
     int i, dist;
@@ -2621,7 +2621,7 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw,
 #endif /* OOS */
     }
 
-    for ( i = 1; i < dist ; i++ ) 
+    for ( i = 1; i < dist ; i++ )
     {
         /* No point in prefetching if there's already a shadow */
         if ( ptr_sl1e[i].l1 != 0 )
@@ -2634,18 +2634,18 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw,
             /* Not worth continuing if we hit an entry that will need another
              * fault for A/D-bit propagation anyway */
             gflags = guest_l1e_get_flags(gl1e);
-            if ( (gflags & _PAGE_PRESENT) 
+            if ( (gflags & _PAGE_PRESENT)
                  && (!(gflags & _PAGE_ACCESSED)
                      || ((gflags & _PAGE_RW) && !(gflags & _PAGE_DIRTY))) )
                 break;
-        } 
-        else 
+        }
+        else
         {
             /* Fragmented superpage, unless we've been called wrongly */
             ASSERT(guest_l2e_get_flags(gw->l2e) & _PAGE_PSE);
             /* Increment the l1e's GFN by the right number of guest pages */
             gl1e = guest_l1e_from_gfn(
-                _gfn(gfn_x(guest_l1e_get_gfn(gw->l1e)) + i), 
+                _gfn(gfn_x(guest_l1e_get_gfn(gw->l1e)) + i),
                 guest_l1e_get_flags(gw->l1e));
         }
 
@@ -2715,7 +2715,7 @@ static inline void trace_shadow_fixup(guest_l1e_t gl1e,
         __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
-                                          
+
 static inline void trace_not_shadow_fault(guest_l1e_t gl1e,
                                           guest_va_t va)
 {
@@ -2739,7 +2739,7 @@ static inline void trace_not_shadow_fault(guest_l1e_t 
gl1e,
         __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
-                                          
+
 static inline void trace_shadow_emulate_other(u32 event,
                                                  guest_va_t va,
                                                  gfn_t gfn)
@@ -2807,8 +2807,8 @@ static inline void trace_shadow_emulate(guest_l1e_t gl1e, 
unsigned long va)
  * shadow code (and the guest should retry) or 0 if it is not (and the
  * fault should be handled elsewhere or passed to the guest). */
 
-static int sh_page_fault(struct vcpu *v, 
-                          unsigned long va, 
+static int sh_page_fault(struct vcpu *v,
+                          unsigned long va,
                           struct cpu_user_regs *regs)
 {
     struct domain *d = v->domain;
@@ -2848,7 +2848,7 @@ static int sh_page_fault(struct vcpu *v,
      * Then try to emulate early to avoid lock aquisition.
      */
     if ( v->arch.paging.last_write_emul_ok
-         && v->arch.paging.shadow.last_emulated_frame == (va >> PAGE_SHIFT) ) 
+         && v->arch.paging.shadow.last_emulated_frame == (va >> PAGE_SHIFT) )
     {
         /* check whether error code is 3, or else fall back to normal path
          * in case of some validation is required
@@ -2858,7 +2858,7 @@ static int sh_page_fault(struct vcpu *v,
             fast_emul = 1;
             gmfn = _mfn(v->arch.paging.shadow.last_emulated_mfn);
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
             /* Fall back to the slow path if we're trying to emulate
                writes to an out of sync page. */
             if ( mfn_valid(gmfn) && mfn_is_out_of_sync(gmfn) )
@@ -2886,7 +2886,7 @@ static int sh_page_fault(struct vcpu *v,
 #if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
     if ( (regs->error_code & PFEC_reserved_bit) )
     {
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
         /* First, need to check that this isn't an out-of-sync
          * shadow l1e.  If it is, we fall back to the slow path, which
          * will sync it up again. */
@@ -2902,7 +2902,7 @@ static int sh_page_fault(struct vcpu *v,
                                   shadow_l2e_get_mfn(sl2e))))
                  || unlikely(mfn_is_out_of_sync(gl1mfn)) )
             {
-                /* Hit the slow path as if there had been no 
+                /* Hit the slow path as if there had been no
                  * shadow entry at all, and let it tidy up */
                 ASSERT(regs->error_code & PFEC_page_present);
                 regs->error_code ^= (PFEC_reserved_bit|PFEC_page_present);
@@ -2910,10 +2910,10 @@ static int sh_page_fault(struct vcpu *v,
             }
         }
 #endif /* SHOPT_OUT_OF_SYNC */
-        /* The only reasons for reserved bits to be set in shadow entries 
+        /* The only reasons for reserved bits to be set in shadow entries
          * are the two "magic" shadow_l1e entries. */
-        if ( likely((__copy_from_user(&sl1e, 
-                                      (sh_linear_l1_table(v) 
+        if ( likely((__copy_from_user(&sl1e,
+                                      (sh_linear_l1_table(v)
                                        + shadow_l1_linear_offset(va)),
                                       sizeof(sl1e)) == 0)
                     && sh_l1e_is_magic(sl1e)) )
@@ -2935,8 +2935,8 @@ static int sh_page_fault(struct vcpu *v,
             {
                 /* Magic MMIO marker: extract gfn for MMIO address */
                 ASSERT(sh_l1e_is_mmio(sl1e));
-                gpa = (((paddr_t)(gfn_x(sh_l1e_mmio_get_gfn(sl1e)))) 
-                       << PAGE_SHIFT) 
+                gpa = (((paddr_t)(gfn_x(sh_l1e_mmio_get_gfn(sl1e))))
+                       << PAGE_SHIFT)
                     | (va & ~PAGE_MASK);
             }
             perfc_incr(shadow_fault_fast_mmio);
@@ -2949,24 +2949,24 @@ static int sh_page_fault(struct vcpu *v,

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.