[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [XEN] More shadow2 cleanups -- primarily moving arch vcpu/domain



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxxx
# Node ID 45a84091144e26ce326b32d8bb661a788ab2685e
# Parent  5fc1fe79083517824d89309cc618f21302724e29
[XEN] More shadow2 cleanups -- primarily moving arch vcpu/domain
fields into separate shadow2-specific structures.
Also rename shadow2_entry_points to shadow2_paging_mode.
Remove VCPUF_shadow2_translate_mode and replace with a better-named
field in vcpu.arch.shadow2
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/domain.c                 |   12 -
 xen/arch/x86/hvm/hvm.c                |   10 
 xen/arch/x86/shadow2-common.c         |  350 ++++++++++++++++------------------
 xen/arch/x86/shadow2.c                |   30 +-
 xen/arch/x86/traps.c                  |    7 
 xen/arch/x86/x86_32/traps.c           |    9 
 xen/arch/x86/x86_64/traps.c           |    8 
 xen/include/asm-x86/domain.h          |   82 ++++---
 xen/include/asm-x86/shadow2-multi.h   |    4 
 xen/include/asm-x86/shadow2-private.h |   24 +-
 xen/include/asm-x86/shadow2-types.h   |    2 
 xen/include/asm-x86/shadow2.h         |   65 +++---
 xen/include/xen/sched.h               |    3 
 13 files changed, 299 insertions(+), 307 deletions(-)

diff -r 5fc1fe790835 -r 45a84091144e xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Sat Aug 19 17:07:54 2006 +0100
+++ b/xen/arch/x86/domain.c     Sun Aug 20 17:55:33 2006 +0100
@@ -195,10 +195,10 @@ int arch_domain_create(struct domain *d)
 
     shadow2_lock_init(d);
     for ( i = 0; i <= SHADOW2_MAX_ORDER; i++ )
-        INIT_LIST_HEAD(&d->arch.shadow2_freelists[i]);
-    INIT_LIST_HEAD(&d->arch.shadow2_p2m_freelist);
-    INIT_LIST_HEAD(&d->arch.shadow2_p2m_inuse);
-    INIT_LIST_HEAD(&d->arch.shadow2_toplevel_shadows);
+        INIT_LIST_HEAD(&d->arch.shadow2.freelists[i]);
+    INIT_LIST_HEAD(&d->arch.shadow2.p2m_freelist);
+    INIT_LIST_HEAD(&d->arch.shadow2.p2m_inuse);
+    INIT_LIST_HEAD(&d->arch.shadow2.toplevel_shadows);
 
     if ( !is_idle_domain(d) )
     {
@@ -338,7 +338,7 @@ int arch_set_info_guest(
     /* Shadow2: make sure the domain has enough shadow memory to
      * boot another vcpu */
     if ( shadow2_mode_enabled(d) 
-         && d->arch.shadow2_total_pages < shadow2_min_acceptable_pages(d) )
+         && d->arch.shadow2.total_pages < shadow2_min_acceptable_pages(d) )
     {
         destroy_gdt(v);
         return -ENOMEM;
@@ -977,7 +977,7 @@ void arch_dump_domain_info(struct domain
     if ( shadow2_mode_enabled(d) )
     {
         printk("    shadow2 mode: ");
-        if ( d->arch.shadow2_mode & SHM2_enable )
+        if ( d->arch.shadow2.mode & SHM2_enable )
             printk("enabled ");
         if ( shadow2_mode_refcounts(d) )
             printk("refcounts ");
diff -r 5fc1fe790835 -r 45a84091144e xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Sat Aug 19 17:07:54 2006 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Sun Aug 20 17:55:33 2006 +0100
@@ -260,14 +260,6 @@ void hvm_setup_platform(struct domain* d
     if ( !hvm_guest(v) || (v->vcpu_id != 0) )
         return;
 
-#if 0 /* SHADOW2 does not have this */
-    if ( shadow_direct_map_init(d) == 0 )
-    {
-        printk("Can not allocate shadow direct map for HVM domain.\n");
-        domain_crash_synchronous();
-    }
-#endif
-
     hvm_zap_iommu_pages(d);
 
     platform = &d->arch.hvm_domain;
@@ -547,7 +539,7 @@ void hvm_do_hypercall(struct cpu_user_re
         return;
     }
 
-    if ( current->arch.shadow2->guest_levels == 4 )
+    if ( current->arch.shadow2.mode->guest_levels == 4 )
     {
         pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
                                                        pregs->rsi,
diff -r 5fc1fe790835 -r 45a84091144e xen/arch/x86/shadow2-common.c
--- a/xen/arch/x86/shadow2-common.c     Sat Aug 19 17:07:54 2006 +0100
+++ b/xen/arch/x86/shadow2-common.c     Sun Aug 20 17:55:33 2006 +0100
@@ -156,7 +156,7 @@ sh2_x86_emulate_write_emulated(unsigned 
 #endif
     if ( hvm_guest(v) )
     {
-        return v->arch.shadow2->x86_emulate_write(v, addr, &val, bytes, ctxt);
+        return v->arch.shadow2.mode->x86_emulate_write(v, addr, &val, bytes, 
ctxt);
     }
     else 
     {
@@ -179,7 +179,7 @@ sh2_x86_emulate_cmpxchg_emulated(unsigne
 #endif
     if ( hvm_guest(v) )
     {
-        return v->arch.shadow2->x86_emulate_cmpxchg(v, addr, old, new, 
+        return v->arch.shadow2.mode->x86_emulate_cmpxchg(v, addr, old, new, 
                                                     bytes, ctxt);
     }
     else 
@@ -205,7 +205,7 @@ sh2_x86_emulate_cmpxchg8b_emulated(unsig
 #endif
     if ( hvm_guest(v) )
     {
-        return v->arch.shadow2->x86_emulate_cmpxchg8b(v, addr, old_lo, old_hi,
+        return v->arch.shadow2.mode->x86_emulate_cmpxchg8b(v, addr, old_lo, 
old_hi,
                                                       new_lo, new_hi, ctxt);
     }
     else 
@@ -423,7 +423,7 @@ shadow2_validate_guest_pt_write(struct v
  * ----------------------------------------------
  * 
  * A count of all references to this page from other shadow pages and
- * guest CR3s (a.k.a. v->arch.shadow_table).  
+ * guest CR3s (a.k.a. v->arch.shadow2.table).  
  *
  * The top bits hold the shadow type and the pinned bit.  Top-level
  * shadows are pinned so that they don't disappear when not in a CR3
@@ -593,7 +593,7 @@ static inline int chunk_is_available(str
     int i;
     
     for ( i = order; i <= SHADOW2_MAX_ORDER; i++ )
-        if ( !list_empty(&d->arch.shadow2_freelists[i]) )
+        if ( !list_empty(&d->arch.shadow2.freelists[i]) )
             return 1;
     return 0;
 }
@@ -649,7 +649,7 @@ void shadow2_prealloc(struct domain *d, 
     
     /* Stage one: walk the list of top-level pages, unpinning them */
     perfc_incrc(shadow2_prealloc_1);
-    list_for_each_backwards_safe(l, t, &d->arch.shadow2_toplevel_shadows)
+    list_for_each_backwards_safe(l, t, &d->arch.shadow2.toplevel_shadows)
     {
         pg = list_entry(l, struct page_info, list);
         smfn = page_to_mfn(pg);
@@ -680,7 +680,7 @@ void shadow2_prealloc(struct domain *d, 
         v = d->vcpu[0];
     /* Walk the list from the tail: recently used toplevels have been pulled
      * to the head */
-    list_for_each_backwards_safe(l, t, &d->arch.shadow2_toplevel_shadows)
+    list_for_each_backwards_safe(l, t, &d->arch.shadow2.toplevel_shadows)
     {
         pg = list_entry(l, struct page_info, list);
         smfn = page_to_mfn(pg);
@@ -700,9 +700,9 @@ void shadow2_prealloc(struct domain *d, 
     SHADOW2_PRINTK("Can't pre-allocate %i shadow pages!\n"
                    "  shadow pages total = %u, free = %u, p2m=%u\n",
                    1 << order, 
-                   d->arch.shadow2_total_pages, 
-                   d->arch.shadow2_free_pages, 
-                   d->arch.shadow2_p2m_pages);
+                   d->arch.shadow2.total_pages, 
+                   d->arch.shadow2.free_pages, 
+                   d->arch.shadow2.p2m_pages);
     BUG();
 }
 
@@ -727,9 +727,9 @@ mfn_t shadow2_alloc(struct domain *d,
 
     /* Find smallest order which can satisfy the request. */
     for ( i = order; i <= SHADOW2_MAX_ORDER; i++ )
-        if ( !list_empty(&d->arch.shadow2_freelists[i]) )
-        {
-            pg = list_entry(d->arch.shadow2_freelists[i].next, 
+        if ( !list_empty(&d->arch.shadow2.freelists[i]) )
+        {
+            pg = list_entry(d->arch.shadow2.freelists[i].next, 
                             struct page_info, list);
             list_del(&pg->list);
             
@@ -738,10 +738,10 @@ mfn_t shadow2_alloc(struct domain *d,
             {
                 i--;
                 SH2_SET_PFN_ORDER(pg, i);
-                list_add_tail(&pg->list, &d->arch.shadow2_freelists[i]);
+                list_add_tail(&pg->list, &d->arch.shadow2.freelists[i]);
                 pg += 1 << i;
             }
-            d->arch.shadow2_free_pages -= 1 << order;
+            d->arch.shadow2.free_pages -= 1 << order;
 
             /* Init page info fields and clear the pages */
             for ( i = 0; i < 1<<order ; i++ ) 
@@ -795,7 +795,7 @@ void shadow2_free(struct domain *d, mfn_
     ASSERT(shadow_type != PGC_SH2_p2m_table);
     order = shadow_order(shadow_type);
 
-    d->arch.shadow2_free_pages += 1 << order;
+    d->arch.shadow2.free_pages += 1 << order;
 
     for ( i = 0; i < 1<<order; i++ ) 
     {
@@ -831,7 +831,7 @@ void shadow2_free(struct domain *d, mfn_
     }
 
     SH2_SET_PFN_ORDER(pg, order);
-    list_add_tail(&pg->list, &d->arch.shadow2_freelists[order]);
+    list_add_tail(&pg->list, &d->arch.shadow2.freelists[order]);
 }
 
 /* Divert some memory from the pool to be used by the p2m mapping.
@@ -851,18 +851,18 @@ shadow2_alloc_p2m_pages(struct domain *d
     u32 i;
     ASSERT(shadow2_lock_is_acquired(d));
     
-    if ( d->arch.shadow2_total_pages 
+    if ( d->arch.shadow2.total_pages 
          < (shadow2_min_acceptable_pages(d) + (1<<SHADOW2_MAX_ORDER)) )
         return 0; /* Not enough shadow memory: need to increase it first */
     
     pg = mfn_to_page(shadow2_alloc(d, PGC_SH2_p2m_table, 0));
-    d->arch.shadow2_p2m_pages += (1<<SHADOW2_MAX_ORDER);
-    d->arch.shadow2_total_pages -= (1<<SHADOW2_MAX_ORDER);
+    d->arch.shadow2.p2m_pages += (1<<SHADOW2_MAX_ORDER);
+    d->arch.shadow2.total_pages -= (1<<SHADOW2_MAX_ORDER);
     for (i = 0; i < (1<<SHADOW2_MAX_ORDER); i++)
     {
         /* Unlike shadow pages, mark p2m pages as owned by the domain */
         page_set_owner(&pg[i], d);
-        list_add_tail(&pg[i].list, &d->arch.shadow2_p2m_freelist);
+        list_add_tail(&pg[i].list, &d->arch.shadow2.p2m_freelist);
     }
     return 1;
 }
@@ -875,12 +875,12 @@ shadow2_alloc_p2m_page(struct domain *d)
     mfn_t mfn;
     void *p;
 
-    if ( list_empty(&d->arch.shadow2_p2m_freelist) &&
+    if ( list_empty(&d->arch.shadow2.p2m_freelist) &&
          !shadow2_alloc_p2m_pages(d) )
         return _mfn(0);
-    entry = d->arch.shadow2_p2m_freelist.next;
+    entry = d->arch.shadow2.p2m_freelist.next;
     list_del(entry);
-    list_add_tail(entry, &d->arch.shadow2_p2m_inuse);
+    list_add_tail(entry, &d->arch.shadow2.p2m_inuse);
     mfn = page_to_mfn(list_entry(entry, struct page_info, list));
     sh2_get_ref(mfn, 0);
     p = sh2_map_domain_page(mfn);
@@ -1201,7 +1201,7 @@ static void shadow2_p2m_teardown(struct 
 
     d->arch.phys_table = pagetable_null();
 
-    list_for_each_safe(entry, n, &d->arch.shadow2_p2m_inuse)
+    list_for_each_safe(entry, n, &d->arch.shadow2.p2m_inuse)
     {
         pg = list_entry(entry, struct page_info, list);
         list_del(entry);
@@ -1216,10 +1216,10 @@ static void shadow2_p2m_teardown(struct 
          * these pages were allocated without an owner. */
         page_set_owner(pg, NULL); 
         free_domheap_pages(pg, 0);
-        d->arch.shadow2_p2m_pages--;
+        d->arch.shadow2.p2m_pages--;
         perfc_decr(shadow2_alloc_count);
     }
-    list_for_each_safe(entry, n, &d->arch.shadow2_p2m_freelist)
+    list_for_each_safe(entry, n, &d->arch.shadow2.p2m_freelist)
     {
         list_del(entry);
         pg = list_entry(entry, struct page_info, list);
@@ -1227,10 +1227,10 @@ static void shadow2_p2m_teardown(struct 
         /* Free should not decrement domain's total allocation. */
         page_set_owner(pg, NULL); 
         free_domheap_pages(pg, 0);
-        d->arch.shadow2_p2m_pages--;
+        d->arch.shadow2.p2m_pages--;
         perfc_decr(shadow2_alloc_count);
     }
-    ASSERT(d->arch.shadow2_p2m_pages == 0);
+    ASSERT(d->arch.shadow2.p2m_pages == 0);
 }
 
 /* Set the pool of shadow pages to the required number of pages.
@@ -1256,11 +1256,11 @@ static unsigned int set_sh2_allocation(s
     pages = (pages + ((1<<SHADOW2_MAX_ORDER)-1)) & ~((1<<SHADOW2_MAX_ORDER)-1);
 
     SHADOW2_PRINTK("current %i target %i\n", 
-                   d->arch.shadow2_total_pages, pages);
-
-    while ( d->arch.shadow2_total_pages != pages ) 
-    {
-        if ( d->arch.shadow2_total_pages < pages ) 
+                   d->arch.shadow2.total_pages, pages);
+
+    while ( d->arch.shadow2.total_pages != pages ) 
+    {
+        if ( d->arch.shadow2.total_pages < pages ) 
         {
             /* Need to allocate more memory from domheap */
             pg = alloc_domheap_pages(NULL, SHADOW2_MAX_ORDER, 0); 
@@ -1269,8 +1269,8 @@ static unsigned int set_sh2_allocation(s
                 SHADOW2_PRINTK("failed to allocate shadow pages.\n");
                 return -ENOMEM;
             }
-            d->arch.shadow2_free_pages += 1<<SHADOW2_MAX_ORDER;
-            d->arch.shadow2_total_pages += 1<<SHADOW2_MAX_ORDER;
+            d->arch.shadow2.free_pages += 1<<SHADOW2_MAX_ORDER;
+            d->arch.shadow2.total_pages += 1<<SHADOW2_MAX_ORDER;
             for ( j = 0; j < 1<<SHADOW2_MAX_ORDER; j++ ) 
             {
                 pg[j].u.inuse.type_info = 0;  /* Free page */
@@ -1278,18 +1278,18 @@ static unsigned int set_sh2_allocation(s
             }
             SH2_SET_PFN_ORDER(pg, SHADOW2_MAX_ORDER);
             list_add_tail(&pg->list, 
-                          &d->arch.shadow2_freelists[SHADOW2_MAX_ORDER]);
+                          &d->arch.shadow2.freelists[SHADOW2_MAX_ORDER]);
         } 
-        else if ( d->arch.shadow2_total_pages > pages ) 
+        else if ( d->arch.shadow2.total_pages > pages ) 
         {
             /* Need to return memory to domheap */
             shadow2_prealloc(d, SHADOW2_MAX_ORDER);
-            ASSERT(!list_empty(&d->arch.shadow2_freelists[SHADOW2_MAX_ORDER]));
-            pg = list_entry(d->arch.shadow2_freelists[SHADOW2_MAX_ORDER].next, 
+            ASSERT(!list_empty(&d->arch.shadow2.freelists[SHADOW2_MAX_ORDER]));
+            pg = list_entry(d->arch.shadow2.freelists[SHADOW2_MAX_ORDER].next, 
                             struct page_info, list);
             list_del(&pg->list);
-            d->arch.shadow2_free_pages -= 1<<SHADOW2_MAX_ORDER;
-            d->arch.shadow2_total_pages -= 1<<SHADOW2_MAX_ORDER;
+            d->arch.shadow2.free_pages -= 1<<SHADOW2_MAX_ORDER;
+            d->arch.shadow2.total_pages -= 1<<SHADOW2_MAX_ORDER;
             free_domheap_pages(pg, SHADOW2_MAX_ORDER);
         }
 
@@ -1314,7 +1314,7 @@ unsigned int shadow2_set_allocation(stru
     rv = set_sh2_allocation(d, megabytes << (20 - PAGE_SHIFT), preempted); 
     SHADOW2_PRINTK("dom %u allocation now %u pages (%u MB)\n",
                    d->domain_id,
-                   d->arch.shadow2_total_pages,
+                   d->arch.shadow2.total_pages,
                    shadow2_get_allocation(d));
     shadow2_unlock(d);
     return rv;
@@ -1347,7 +1347,7 @@ static void sh2_hash_audit_bucket(struct
     if ( !(SHADOW2_AUDIT_ENABLE) )
         return;
 
-    e = &d->arch.shadow2_hash_table[bucket];
+    e = &d->arch.shadow2.hash_table[bucket];
     if ( e->t == 0 ) return; /* Bucket is empty */ 
     while ( e )
     {
@@ -1418,7 +1418,7 @@ static struct shadow2_hash_entry *sh2_al
 
     /* We need to allocate a new node. Ensure the free list is not empty. 
      * Allocate new entries in units the same size as the original table. */
-    if ( unlikely(d->arch.shadow2_hash_freelist == NULL) )
+    if ( unlikely(d->arch.shadow2.hash_freelist == NULL) )
     {
         size_t sz = sizeof(void *) + (SHADOW2_HASH_BUCKETS * sizeof(*x));
         extra = xmalloc_bytes(sz);
@@ -1433,8 +1433,8 @@ static struct shadow2_hash_entry *sh2_al
 
         /* Record the allocation block so it can be correctly freed later. */
         *((struct shadow2_hash_entry **)&extra[SHADOW2_HASH_BUCKETS]) = 
-            d->arch.shadow2_hash_allocations;
-        d->arch.shadow2_hash_allocations = &extra[0];
+            d->arch.shadow2.hash_allocations;
+        d->arch.shadow2.hash_allocations = &extra[0];
 
         /* Thread a free chain through the newly-allocated nodes. */
         for ( i = 0; i < (SHADOW2_HASH_BUCKETS - 1); i++ )
@@ -1442,12 +1442,12 @@ static struct shadow2_hash_entry *sh2_al
         extra[i].next = NULL;
 
         /* Add the new nodes to the free list. */
-        d->arch.shadow2_hash_freelist = &extra[0];
+        d->arch.shadow2.hash_freelist = &extra[0];
     }
 
     /* Allocate a new node from the free list. */
-    x = d->arch.shadow2_hash_freelist;
-    d->arch.shadow2_hash_freelist = x->next;
+    x = d->arch.shadow2.hash_freelist;
+    d->arch.shadow2.hash_freelist = x->next;
     return x;
 }
 
@@ -1455,8 +1455,8 @@ static void sh2_free_hash_entry(struct d
 {
     /* Mark the bucket as empty and return it to the free list */
     e->t = 0; 
-    e->next = d->arch.shadow2_hash_freelist;
-    d->arch.shadow2_hash_freelist = e;
+    e->next = d->arch.shadow2.hash_freelist;
+    d->arch.shadow2.hash_freelist = e;
 }
 
 
@@ -1467,13 +1467,13 @@ static int shadow2_hash_alloc(struct dom
     struct shadow2_hash_entry *table;
 
     ASSERT(shadow2_lock_is_acquired(d));
-    ASSERT(!d->arch.shadow2_hash_table);
+    ASSERT(!d->arch.shadow2.hash_table);
 
     table = xmalloc_array(struct shadow2_hash_entry, SHADOW2_HASH_BUCKETS);
     if ( !table ) return 1;
     memset(table, 0, 
            SHADOW2_HASH_BUCKETS * sizeof (struct shadow2_hash_entry));
-    d->arch.shadow2_hash_table = table;
+    d->arch.shadow2.hash_table = table;
     return 0;
 }
 
@@ -1484,14 +1484,14 @@ static void shadow2_hash_teardown(struct
     struct shadow2_hash_entry *a, *n;
 
     ASSERT(shadow2_lock_is_acquired(d));
-    ASSERT(d->arch.shadow2_hash_table);
+    ASSERT(d->arch.shadow2.hash_table);
 
     /* Return the table itself */
-    xfree(d->arch.shadow2_hash_table);
-    d->arch.shadow2_hash_table = NULL;
+    xfree(d->arch.shadow2.hash_table);
+    d->arch.shadow2.hash_table = NULL;
 
     /* Return any extra allocations */
-    a = d->arch.shadow2_hash_allocations;
+    a = d->arch.shadow2.hash_allocations;
     while ( a ) 
     {
         /* We stored a linked-list pointer at the end of each allocation */
@@ -1499,8 +1499,8 @@ static void shadow2_hash_teardown(struct
         xfree(a);
         a = n;
     }
-    d->arch.shadow2_hash_allocations = NULL;
-    d->arch.shadow2_hash_freelist = NULL;
+    d->arch.shadow2.hash_allocations = NULL;
+    d->arch.shadow2.hash_freelist = NULL;
 }
 
 
@@ -1513,7 +1513,7 @@ mfn_t shadow2_hash_lookup(struct vcpu *v
     key_t key;
 
     ASSERT(shadow2_lock_is_acquired(d));
-    ASSERT(d->arch.shadow2_hash_table);
+    ASSERT(d->arch.shadow2.hash_table);
     ASSERT(t);
 
     sh2_hash_audit(d);
@@ -1521,7 +1521,7 @@ mfn_t shadow2_hash_lookup(struct vcpu *v
     perfc_incrc(shadow2_hash_lookups);
     key = sh2_hash(n, t);
 
-    x = head = &d->arch.shadow2_hash_table[key % SHADOW2_HASH_BUCKETS];
+    x = head = &d->arch.shadow2.hash_table[key % SHADOW2_HASH_BUCKETS];
     p = NULL;
 
     sh2_hash_audit_bucket(d, key % SHADOW2_HASH_BUCKETS);
@@ -1535,7 +1535,7 @@ mfn_t shadow2_hash_lookup(struct vcpu *v
             /* Pull-to-front if 'x' isn't already the head item */
             if ( unlikely(x != head) )
             {
-                if ( unlikely(d->arch.shadow2_hash_walking != 0) )
+                if ( unlikely(d->arch.shadow2.hash_walking != 0) )
                     /* Can't reorder: someone is walking the hash chains */
                     return x->smfn;
                 else 
@@ -1575,7 +1575,7 @@ void shadow2_hash_insert(struct vcpu *v,
     key_t key;
     
     ASSERT(shadow2_lock_is_acquired(d));
-    ASSERT(d->arch.shadow2_hash_table);
+    ASSERT(d->arch.shadow2.hash_table);
     ASSERT(t);
 
     sh2_hash_audit(d);
@@ -1583,7 +1583,7 @@ void shadow2_hash_insert(struct vcpu *v,
     perfc_incrc(shadow2_hash_inserts);
     key = sh2_hash(n, t);
 
-    head = &d->arch.shadow2_hash_table[key % SHADOW2_HASH_BUCKETS];
+    head = &d->arch.shadow2.hash_table[key % SHADOW2_HASH_BUCKETS];
 
     sh2_hash_audit_bucket(d, key % SHADOW2_HASH_BUCKETS);
 
@@ -1617,7 +1617,7 @@ void shadow2_hash_delete(struct vcpu *v,
     key_t key;
 
     ASSERT(shadow2_lock_is_acquired(d));
-    ASSERT(d->arch.shadow2_hash_table);
+    ASSERT(d->arch.shadow2.hash_table);
     ASSERT(t);
 
     sh2_hash_audit(d);
@@ -1625,7 +1625,7 @@ void shadow2_hash_delete(struct vcpu *v,
     perfc_incrc(shadow2_hash_deletes);
     key = sh2_hash(n, t);
 
-    head = &d->arch.shadow2_hash_table[key % SHADOW2_HASH_BUCKETS];
+    head = &d->arch.shadow2.hash_table[key % SHADOW2_HASH_BUCKETS];
 
     sh2_hash_audit_bucket(d, key % SHADOW2_HASH_BUCKETS);
 
@@ -1695,8 +1695,8 @@ static void hash_foreach(struct vcpu *v,
 
     /* Say we're here, to stop hash-lookups reordering the chains */
     ASSERT(shadow2_lock_is_acquired(d));
-    ASSERT(d->arch.shadow2_hash_walking == 0);
-    d->arch.shadow2_hash_walking = 1;
+    ASSERT(d->arch.shadow2.hash_walking == 0);
+    d->arch.shadow2.hash_walking = 1;
 
     callback_mask &= ~1; /* Never attempt to call back on empty buckets */
     for ( i = 0; i < SHADOW2_HASH_BUCKETS; i++ ) 
@@ -1704,7 +1704,7 @@ static void hash_foreach(struct vcpu *v,
         /* WARNING: This is not safe against changes to the hash table.
          * The callback *must* return non-zero if it has inserted or
          * deleted anything from the hash (lookups are OK, though). */
-        for ( x = &d->arch.shadow2_hash_table[i]; x; x = x->next )
+        for ( x = &d->arch.shadow2.hash_table[i]; x; x = x->next )
         {
             if ( callback_mask & (1 << x->t) ) 
             {
@@ -1716,7 +1716,7 @@ static void hash_foreach(struct vcpu *v,
         }
         if ( done ) break; 
     }
-    d->arch.shadow2_hash_walking = 0; 
+    d->arch.shadow2.hash_walking = 0; 
 }
 
 
@@ -1891,7 +1891,7 @@ int shadow2_remove_write_access(struct v
          * magic slot used to map high memory regions (linux HIGHTPTE) */
 
 #define GUESS(_a, _h) do {                                              \
-            if ( v->arch.shadow2->guess_wrmap(v, (_a), gmfn) )          \
+            if ( v->arch.shadow2.mode->guess_wrmap(v, (_a), gmfn) )          \
                 perfc_incrc(shadow2_writeable_h_ ## _h);                \
             if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 )        \
                 return 1;                                               \
@@ -1903,14 +1903,14 @@ int shadow2_remove_write_access(struct v
              && (gfn = sh2_mfn_to_gfn(v->domain, gmfn)) < 0x40000000 )
             GUESS(0xC0000000 + (gfn << PAGE_SHIFT), 4);
 
-        if ( v->arch.shadow2->guest_levels == 2 )
+        if ( v->arch.shadow2.mode->guest_levels == 2 )
         {
             if ( level == 1 )
                 /* 32bit non-PAE w2k3: linear map at 0xC0000000 */
                 GUESS(0xC0000000UL + (fault_addr >> 10), 1);
         }
 #if CONFIG_PAGING_LEVELS >= 3
-        else if ( v->arch.shadow2->guest_levels == 3 )
+        else if ( v->arch.shadow2.mode->guest_levels == 3 )
         {
             /* 32bit PAE w2k3: linear map at 0xC0000000 */
             switch ( level ) 
@@ -1920,7 +1920,7 @@ int shadow2_remove_write_access(struct v
             }
         }
 #if CONFIG_PAGING_LEVELS >= 4
-        else if ( v->arch.shadow2->guest_levels == 4 )
+        else if ( v->arch.shadow2.mode->guest_levels == 4 )
         {
             /* 64bit w2k3: linear map at 0x0000070000000000 */
             switch ( level ) 
@@ -2273,7 +2273,7 @@ void sh2_update_paging_modes(struct vcpu
 void sh2_update_paging_modes(struct vcpu *v)
 {
     struct domain *d = v->domain;
-    struct shadow2_entry_points *old_entries = v->arch.shadow2;
+    struct shadow2_paging_mode *old_mode = v->arch.shadow2.mode;
     mfn_t old_guest_table;
 
     ASSERT(shadow2_lock_is_acquired(d));
@@ -2297,8 +2297,7 @@ void sh2_update_paging_modes(struct vcpu
 
     // First, tear down any old shadow tables held by this vcpu.
     //
-    if ( v->arch.shadow2 )
-        shadow2_detach_old_tables(v);
+    shadow2_detach_old_tables(v);
 
     if ( !hvm_guest(v) )
     {
@@ -2307,13 +2306,13 @@ void sh2_update_paging_modes(struct vcpu
         ///
 #if CONFIG_PAGING_LEVELS == 4
         if ( pv_32bit_guest(v) )
-            v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 4, 3);
+            v->arch.shadow2.mode = &SHADOW2_INTERNAL_NAME(sh2_paging_mode,4,3);
         else
-            v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 4, 4);
+            v->arch.shadow2.mode = &SHADOW2_INTERNAL_NAME(sh2_paging_mode,4,4);
 #elif CONFIG_PAGING_LEVELS == 3
-        v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 3, 3);
+        v->arch.shadow2.mode = &SHADOW2_INTERNAL_NAME(sh2_paging_mode,3,3);
 #elif CONFIG_PAGING_LEVELS == 2
-        v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 2, 2);
+        v->arch.shadow2.mode = &SHADOW2_INTERNAL_NAME(sh2_paging_mode,2,2);
 #else
 #error unexpected paging mode
 #endif
@@ -2326,10 +2325,9 @@ void sh2_update_paging_modes(struct vcpu
         ASSERT(shadow2_mode_translate(d));
         ASSERT(shadow2_mode_external(d));
 
-        if ( !hvm_paging_enabled(v) )
-        {
-            // paging disabled...
-            clear_bit(_VCPUF_shadow2_translate, &v->vcpu_flags);
+        v->arch.shadow2.hvm_paging_enabled = !!hvm_paging_enabled(v);
+        if ( !v->arch.shadow2.hvm_paging_enabled )
+        {
             
             /* Set v->arch.guest_table to use the p2m map, and choose
              * the appropriate shadow mode */
@@ -2337,11 +2335,11 @@ void sh2_update_paging_modes(struct vcpu
 #if CONFIG_PAGING_LEVELS == 2
             v->arch.guest_table =
                 pagetable_from_pfn(pagetable_get_pfn(d->arch.phys_table));
-            v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry,2,2);
+            v->arch.shadow2.mode = &SHADOW2_INTERNAL_NAME(sh2_paging_mode,2,2);
 #elif CONFIG_PAGING_LEVELS == 3 
             v->arch.guest_table =
                 pagetable_from_pfn(pagetable_get_pfn(d->arch.phys_table));
-            v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry,3,3);
+            v->arch.shadow2.mode = &SHADOW2_INTERNAL_NAME(sh2_paging_mode,3,3);
 #else /* CONFIG_PAGING_LEVELS == 4 */
             { 
                 l4_pgentry_t *l4e; 
@@ -2353,7 +2351,7 @@ void sh2_update_paging_modes(struct vcpu
                     pagetable_from_pfn(l4e_get_pfn(l4e[0]));
                 sh2_unmap_domain_page(l4e);
             }
-            v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry,3,3);
+            v->arch.shadow2.mode = &SHADOW2_INTERNAL_NAME(sh2_paging_mode,3,3);
 #endif
             /* Fix up refcounts on guest_table */
             get_page(mfn_to_page(pagetable_get_mfn(v->arch.guest_table)), d);
@@ -2362,13 +2360,12 @@ void sh2_update_paging_modes(struct vcpu
         }
         else
         {
-            set_bit(_VCPUF_shadow2_translate, &v->vcpu_flags);
-
 #ifdef __x86_64__
             if ( hvm_long_mode_enabled(v) )
             {
                 // long mode guest...
-                v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 4, 4);
+                v->arch.shadow2.mode =
+                    &SHADOW2_INTERNAL_NAME(sh2_paging_mode, 4, 4);
             }
             else
 #endif
@@ -2376,7 +2373,8 @@ void sh2_update_paging_modes(struct vcpu
                 {
 #if CONFIG_PAGING_LEVELS >= 3
                     // 32-bit PAE mode guest...
-                    v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 3, 
3);
+                    v->arch.shadow2.mode =
+                        &SHADOW2_INTERNAL_NAME(sh2_paging_mode, 3, 3);
 #else
                     SHADOW2_ERROR("PAE not supported in 32-bit Xen\n");
                     domain_crash(d);
@@ -2387,13 +2385,15 @@ void sh2_update_paging_modes(struct vcpu
                 {
                     // 32-bit 2 level guest...
 #if CONFIG_PAGING_LEVELS >= 3
-                    v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 3, 
2);
+                    v->arch.shadow2.mode =
+                        &SHADOW2_INTERNAL_NAME(sh2_paging_mode, 3, 2);
 #else
-                    v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 2, 
2);
+                    v->arch.shadow2.mode =
+                        &SHADOW2_INTERNAL_NAME(sh2_paging_mode, 2, 2);
 #endif
                 }
         }
-        
+
         if ( pagetable_get_pfn(v->arch.monitor_table) == 0 )
         {
             mfn_t mmfn = shadow2_make_monitor_table(v);
@@ -2401,18 +2401,18 @@ void sh2_update_paging_modes(struct vcpu
             v->arch.monitor_vtable = sh2_map_domain_page(mmfn);
         } 
 
-        if ( v->arch.shadow2 != old_entries )
+        if ( v->arch.shadow2.mode != old_mode )
         {
             SHADOW2_PRINTK("new paging mode: d=%u v=%u g=%u s=%u "
                            "(was g=%u s=%u)\n",
                            d->domain_id, v->vcpu_id, 
-                           v->arch.shadow2->guest_levels,
-                           v->arch.shadow2->shadow_levels,
-                           old_entries ? old_entries->guest_levels : 0,
-                           old_entries ? old_entries->shadow_levels : 0);
-            if ( old_entries &&
-                 (v->arch.shadow2->shadow_levels !=
-                  old_entries->shadow_levels) )
+                           v->arch.shadow2.mode->guest_levels,
+                           v->arch.shadow2.mode->shadow_levels,
+                           old_mode ? old_mode->guest_levels : 0,
+                           old_mode ? old_mode->shadow_levels : 0);
+            if ( old_mode &&
+                 (v->arch.shadow2.mode->shadow_levels !=
+                  old_mode->shadow_levels) )
             {
                 /* Need to make a new monitor table for the new mode */
                 mfn_t new_mfn, old_mfn;
@@ -2430,7 +2430,7 @@ void sh2_update_paging_modes(struct vcpu
                 sh2_unmap_domain_page(v->arch.monitor_vtable);
                 old_mfn = pagetable_get_mfn(v->arch.monitor_table);
                 v->arch.monitor_table = pagetable_null();
-                new_mfn = v->arch.shadow2->make_monitor_table(v);            
+                new_mfn = v->arch.shadow2.mode->make_monitor_table(v);         
   
                 v->arch.monitor_table = pagetable_from_mfn(new_mfn);
                 v->arch.monitor_vtable = sh2_map_domain_page(new_mfn);
                 SHADOW2_PRINTK("new monitor table %"SH2_PRI_mfn "\n",
@@ -2442,7 +2442,7 @@ void sh2_update_paging_modes(struct vcpu
                 make_cr3(v, mfn_x(new_mfn));
                 write_ptbase(v);
                 hvm_update_host_cr3(v);
-                old_entries->destroy_monitor_table(v, old_mfn);
+                old_mode->destroy_monitor_table(v, old_mfn);
             }
         }
 
@@ -2452,7 +2452,7 @@ void sh2_update_paging_modes(struct vcpu
         //        This *does* happen, at least for CR4.PGE...
     }
 
-    v->arch.shadow2->update_cr3(v);
+    v->arch.shadow2.mode->update_cr3(v);
 }
 
 /**************************************************************************/
@@ -2465,7 +2465,7 @@ static void sh2_new_mode(struct domain *
 
     ASSERT(shadow2_lock_is_acquired(d));
     ASSERT(d != current->domain);
-    d->arch.shadow2_mode = new_mode;
+    d->arch.shadow2.mode = new_mode;
     if ( new_mode & SHM2_translate ) 
         shadow2_audit_p2m(d);
     for_each_vcpu(d, v)
@@ -2509,7 +2509,7 @@ static int shadow2_enable(struct domain 
 #endif
 
     /* Init the shadow memory allocation if the user hasn't done so */
-    old_pages = d->arch.shadow2_total_pages;
+    old_pages = d->arch.shadow2.total_pages;
     if ( old_pages == 0 )
         if ( set_sh2_allocation(d, 256, NULL) != 0 ) /* Use at least 1MB */
         {
@@ -2564,8 +2564,7 @@ void shadow2_teardown(struct domain *d)
         /* Release the shadow and monitor tables held by each vcpu */
         for_each_vcpu(d, v)
         {
-            if ( v->arch.shadow2 )
-                shadow2_detach_old_tables(v);
+            shadow2_detach_old_tables(v);
             if ( shadow2_mode_external(d) )
             {
                 mfn = pagetable_get_mfn(v->arch.monitor_table);
@@ -2576,34 +2575,34 @@ void shadow2_teardown(struct domain *d)
         }
     }
 
-    if ( d->arch.shadow2_total_pages != 0 )
+    if ( d->arch.shadow2.total_pages != 0 )
     {
         SHADOW2_PRINTK("teardown of domain %u starts."
                        "  Shadow pages total = %u, free = %u, p2m=%u\n",
                        d->domain_id,
-                       d->arch.shadow2_total_pages, 
-                       d->arch.shadow2_free_pages, 
-                       d->arch.shadow2_p2m_pages);
+                       d->arch.shadow2.total_pages, 
+                       d->arch.shadow2.free_pages, 
+                       d->arch.shadow2.p2m_pages);
         /* Destroy all the shadows and release memory to domheap */
         set_sh2_allocation(d, 0, NULL);
         /* Release the hash table back to xenheap */
-        if (d->arch.shadow2_hash_table) 
+        if (d->arch.shadow2.hash_table) 
             shadow2_hash_teardown(d);
         /* Release the log-dirty bitmap of dirtied pages */
         sh2_free_log_dirty_bitmap(d);
         /* Should not have any more memory held */
         SHADOW2_PRINTK("teardown done."
                        "  Shadow pages total = %u, free = %u, p2m=%u\n",
-                       d->arch.shadow2_total_pages, 
-                       d->arch.shadow2_free_pages, 
-                       d->arch.shadow2_p2m_pages);
-        ASSERT(d->arch.shadow2_total_pages == 0);
+                       d->arch.shadow2.total_pages, 
+                       d->arch.shadow2.free_pages, 
+                       d->arch.shadow2.p2m_pages);
+        ASSERT(d->arch.shadow2.total_pages == 0);
     }
 
     /* We leave the "permanent" shadow modes enabled, but clear the
      * log-dirty mode bit.  We don't want any more mark_dirty()
      * calls now that we've torn down the bitmap */
-    d->arch.shadow2_mode &= ~SHM2_log_dirty;
+    d->arch.shadow2.mode &= ~SHM2_log_dirty;
 
     shadow2_unlock(d);
 }
@@ -2615,26 +2614,26 @@ void shadow2_final_teardown(struct domai
     SHADOW2_PRINTK("dom %u final teardown starts."
                    "  Shadow pages total = %u, free = %u, p2m=%u\n",
                    d->domain_id,
-                   d->arch.shadow2_total_pages, 
-                   d->arch.shadow2_free_pages, 
-                   d->arch.shadow2_p2m_pages);
+                   d->arch.shadow2.total_pages, 
+                   d->arch.shadow2.free_pages, 
+                   d->arch.shadow2.p2m_pages);
 
     /* Double-check that the domain didn't have any shadow memory.  
      * It is possible for a domain that never got domain_kill()ed
      * to get here with its shadow allocation intact. */
-    if ( d->arch.shadow2_total_pages != 0 )
+    if ( d->arch.shadow2.total_pages != 0 )
         shadow2_teardown(d);
 
     /* It is now safe to pull down the p2m map. */
-    if ( d->arch.shadow2_p2m_pages != 0 )
+    if ( d->arch.shadow2.p2m_pages != 0 )
         shadow2_p2m_teardown(d);
 
     SHADOW2_PRINTK("dom %u final teardown done."
                    "  Shadow pages total = %u, free = %u, p2m=%u\n",
                    d->domain_id,
-                   d->arch.shadow2_total_pages, 
-                   d->arch.shadow2_free_pages, 
-                   d->arch.shadow2_p2m_pages);
+                   d->arch.shadow2.total_pages, 
+                   d->arch.shadow2.free_pages, 
+                   d->arch.shadow2.p2m_pages);
 }
 
 static int shadow2_one_bit_enable(struct domain *d, u32 mode)
@@ -2643,12 +2642,12 @@ static int shadow2_one_bit_enable(struct
     ASSERT(shadow2_lock_is_acquired(d));
 
     /* Sanity check the call */
-    if ( d == current->domain || (d->arch.shadow2_mode & mode) )
+    if ( d == current->domain || (d->arch.shadow2.mode & mode) )
     {
         return -EINVAL;
     }
 
-    if ( d->arch.shadow2_mode == 0 )
+    if ( d->arch.shadow2.mode == 0 )
     {
         /* Init the shadow memory allocation and the hash table */
         if ( set_sh2_allocation(d, 1, NULL) != 0 
@@ -2660,7 +2659,7 @@ static int shadow2_one_bit_enable(struct
     }
 
     /* Update the bits */
-    sh2_new_mode(d, d->arch.shadow2_mode | mode);
+    sh2_new_mode(d, d->arch.shadow2.mode | mode);
 
     return 0;
 }
@@ -2672,26 +2671,25 @@ static int shadow2_one_bit_disable(struc
     ASSERT(shadow2_lock_is_acquired(d));
 
     /* Sanity check the call */
-    if ( d == current->domain || !(d->arch.shadow2_mode & mode) )
+    if ( d == current->domain || !(d->arch.shadow2.mode & mode) )
     {
         return -EINVAL;
     }
 
     /* Update the bits */
-    sh2_new_mode(d, d->arch.shadow2_mode & ~mode);
-    if ( d->arch.shadow2_mode == 0 )
+    sh2_new_mode(d, d->arch.shadow2.mode & ~mode);
+    if ( d->arch.shadow2.mode == 0 )
     {
         /* Get this domain off shadows */
         SHADOW2_PRINTK("un-shadowing of domain %u starts."
                        "  Shadow pages total = %u, free = %u, p2m=%u\n",
                        d->domain_id,
-                       d->arch.shadow2_total_pages, 
-                       d->arch.shadow2_free_pages, 
-                       d->arch.shadow2_p2m_pages);
+                       d->arch.shadow2.total_pages, 
+                       d->arch.shadow2.free_pages, 
+                       d->arch.shadow2.p2m_pages);
         for_each_vcpu(d, v)
         {
-            if ( v->arch.shadow2 )
-                shadow2_detach_old_tables(v);
+            shadow2_detach_old_tables(v);
 #if CONFIG_PAGING_LEVELS == 4
             if ( !(v->arch.flags & TF_kernel_mode) )
                 make_cr3(v, pagetable_get_pfn(v->arch.guest_table_user));
@@ -2714,9 +2712,9 @@ static int shadow2_one_bit_disable(struc
         SHADOW2_PRINTK("un-shadowing of domain %u done."
                        "  Shadow pages total = %u, free = %u, p2m=%u\n",
                        d->domain_id,
-                       d->arch.shadow2_total_pages, 
-                       d->arch.shadow2_free_pages, 
-                       d->arch.shadow2_p2m_pages);
+                       d->arch.shadow2.total_pages, 
+                       d->arch.shadow2.free_pages, 
+                       d->arch.shadow2.p2m_pages);
     }
 
     return 0;
@@ -2762,19 +2760,19 @@ static int
 static int
 sh2_alloc_log_dirty_bitmap(struct domain *d)
 {
-    ASSERT(d->arch.shadow_dirty_bitmap == NULL);
-    d->arch.shadow_dirty_bitmap_size =
+    ASSERT(d->arch.shadow2.dirty_bitmap == NULL);
+    d->arch.shadow2.dirty_bitmap_size =
         (d->shared_info->arch.max_pfn + (BITS_PER_LONG - 1)) &
         ~(BITS_PER_LONG - 1);
-    d->arch.shadow_dirty_bitmap =
+    d->arch.shadow2.dirty_bitmap =
         xmalloc_array(unsigned long,
-                      d->arch.shadow_dirty_bitmap_size / BITS_PER_LONG);
-    if ( d->arch.shadow_dirty_bitmap == NULL )
-    {
-        d->arch.shadow_dirty_bitmap_size = 0;
+                      d->arch.shadow2.dirty_bitmap_size / BITS_PER_LONG);
+    if ( d->arch.shadow2.dirty_bitmap == NULL )
+    {
+        d->arch.shadow2.dirty_bitmap_size = 0;
         return -ENOMEM;
     }
-    memset(d->arch.shadow_dirty_bitmap, 0, d->arch.shadow_dirty_bitmap_size/8);
+    memset(d->arch.shadow2.dirty_bitmap, 0, 
d->arch.shadow2.dirty_bitmap_size/8);
 
     return 0;
 }
@@ -2782,11 +2780,11 @@ static void
 static void
 sh2_free_log_dirty_bitmap(struct domain *d)
 {
-    d->arch.shadow_dirty_bitmap_size = 0;
-    if ( d->arch.shadow_dirty_bitmap )
-    {
-        xfree(d->arch.shadow_dirty_bitmap);
-        d->arch.shadow_dirty_bitmap = NULL;
+    d->arch.shadow2.dirty_bitmap_size = 0;
+    if ( d->arch.shadow2.dirty_bitmap )
+    {
+        xfree(d->arch.shadow2.dirty_bitmap);
+        d->arch.shadow2.dirty_bitmap = NULL;
     }
 }
 
@@ -2968,11 +2966,11 @@ static int shadow2_log_dirty_op(struct d
     SHADOW2_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n", 
                   (clean) ? "clean" : "peek",
                   d->domain_id,
-                  d->arch.shadow_fault_count, 
-                  d->arch.shadow_dirty_count);
-
-    sc->stats.fault_count = d->arch.shadow_fault_count;
-    sc->stats.dirty_count = d->arch.shadow_dirty_count;    
+                  d->arch.shadow2.fault_count, 
+                  d->arch.shadow2.dirty_count);
+
+    sc->stats.fault_count = d->arch.shadow2.fault_count;
+    sc->stats.dirty_count = d->arch.shadow2.dirty_count;    
         
     if ( clean ) 
     {
@@ -2982,25 +2980,25 @@ static int shadow2_log_dirty_op(struct d
         /* Need to revoke write access to the domain's pages again. 
          * In future, we'll have a less heavy-handed approach to this, 
          * but for now, we just unshadow everything except Xen. */
-        list_for_each_safe(l, t, &d->arch.shadow2_toplevel_shadows)
+        list_for_each_safe(l, t, &d->arch.shadow2.toplevel_shadows)
         {
             pg = list_entry(l, struct page_info, list);
             shadow2_unhook_mappings(d->vcpu[0], page_to_mfn(pg));
         }
 
-        d->arch.shadow_fault_count = 0;
-        d->arch.shadow_dirty_count = 0;
+        d->arch.shadow2.fault_count = 0;
+        d->arch.shadow2.dirty_count = 0;
     }
 
     if ( guest_handle_is_null(sc->dirty_bitmap) ||
-         (d->arch.shadow_dirty_bitmap == NULL) )
+         (d->arch.shadow2.dirty_bitmap == NULL) )
     {
         rv = -EINVAL;
         goto out;
     }
  
-    if ( sc->pages > d->arch.shadow_dirty_bitmap_size )
-        sc->pages = d->arch.shadow_dirty_bitmap_size; 
+    if ( sc->pages > d->arch.shadow2.dirty_bitmap_size )
+        sc->pages = d->arch.shadow2.dirty_bitmap_size; 
 
 #define CHUNK (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */
     for ( i = 0; i < sc->pages; i += CHUNK )
@@ -3012,7 +3010,7 @@ static int shadow2_log_dirty_op(struct d
         if ( copy_to_guest_offset(
                  sc->dirty_bitmap, 
                  i/(8*sizeof(unsigned long)),
-                 d->arch.shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))),
+                 d->arch.shadow2.dirty_bitmap + (i/(8*sizeof(unsigned long))),
                  (bytes + sizeof(unsigned long) - 1) / sizeof(unsigned long)) )
         {
             rv = -EINVAL;
@@ -3020,7 +3018,7 @@ static int shadow2_log_dirty_op(struct d
         }
 
         if ( clean )
-            memset(d->arch.shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))),
+            memset(d->arch.shadow2.dirty_bitmap + (i/(8*sizeof(unsigned 
long))),
                    0, bytes);
     }
 #undef CHUNK
@@ -3043,7 +3041,7 @@ void sh2_do_mark_dirty(struct domain *d,
     if ( !valid_mfn(gmfn) )
         return;
 
-    ASSERT(d->arch.shadow_dirty_bitmap != NULL);
+    ASSERT(d->arch.shadow2.dirty_bitmap != NULL);
 
     /* We /really/ mean PFN here, even for non-translated guests. */
     pfn = get_gpfn_from_mfn(mfn_x(gmfn));
@@ -3057,14 +3055,14 @@ void sh2_do_mark_dirty(struct domain *d,
         return;
 
     /* N.B. Can use non-atomic TAS because protected by shadow2_lock. */
-    if ( likely(pfn < d->arch.shadow_dirty_bitmap_size) ) 
+    if ( likely(pfn < d->arch.shadow2.dirty_bitmap_size) ) 
     { 
-        if ( !__test_and_set_bit(pfn, d->arch.shadow_dirty_bitmap) )
+        if ( !__test_and_set_bit(pfn, d->arch.shadow2.dirty_bitmap) )
         {
             SHADOW2_DEBUG(LOGDIRTY, 
                           "marked mfn %" SH2_PRI_mfn " (pfn=%lx), dom %d\n",
                           mfn_x(gmfn), pfn, d->domain_id);
-            d->arch.shadow_dirty_count++;
+            d->arch.shadow2.dirty_count++;
         }
     }
     else
@@ -3074,7 +3072,7 @@ void sh2_do_mark_dirty(struct domain *d,
                        "owner=%d c=%08x t=%" PRtype_info "\n",
                        mfn_x(gmfn), 
                        pfn, 
-                       d->arch.shadow_dirty_bitmap_size,
+                       d->arch.shadow2.dirty_bitmap_size,
                        d->domain_id,
                        (page_get_owner(mfn_to_page(gmfn))
                         ? page_get_owner(mfn_to_page(gmfn))->domain_id
@@ -3106,7 +3104,7 @@ int shadow2_control_op(struct domain *d,
         if ( shadow2_mode_log_dirty(d) )
             if ( (rc = shadow2_log_dirty_disable(d)) != 0 ) 
                 return rc;
-        if ( d->arch.shadow2_mode & SHM2_enable )
+        if ( d->arch.shadow2.mode & SHM2_enable )
             if ( (rc = shadow2_test_disable(d)) != 0 ) 
                 return rc;
         return 0;
@@ -3193,7 +3191,7 @@ void shadow2_audit_tables(struct vcpu *v
     else 
     {
         /* Audit only the current mode's tables */
-        switch (v->arch.shadow2->guest_levels)
+        switch ( v->arch.shadow2.mode->guest_levels )
         {
         case 2: mask = (SH2F_L1_32|SH2F_FL1_32|SH2F_L2_32); break;
         case 3: mask = (SH2F_L1_PAE|SH2F_FL1_PAE|SH2F_L2_PAE
diff -r 5fc1fe790835 -r 45a84091144e xen/arch/x86/shadow2.c
--- a/xen/arch/x86/shadow2.c    Sat Aug 19 17:07:54 2006 +0100
+++ b/xen/arch/x86/shadow2.c    Sun Aug 20 17:55:33 2006 +0100
@@ -82,7 +82,7 @@
  * mappings (ugh! PAE linear mappings) and we copy it to the low-memory
  * buffer so it fits in CR3.  Maybe we can avoid some of this recopying 
  * by using the shadow directly in some places. 
- * Also, for SMP, need to actually respond to seeing shadow2_pae_flip_pending.
+ * Also, for SMP, need to actually respond to seeing shadow2.pae_flip_pending.
  *
  * GUEST_WALK_TABLES TLB FLUSH COALESCE
  * guest_walk_tables can do up to three remote TLB flushes as it walks to
@@ -1245,7 +1245,7 @@ static int shadow_set_l3e(struct vcpu *v
             if (info->vcpus & (1 << vcpu->vcpu_id))
             {
                 // Remember that this flip/update needs to occur.
-                vcpu->arch.shadow2_pae_flip_pending = 1;
+                vcpu->arch.shadow2.pae_flip_pending = 1;
                 flags |= SHADOW2_SET_L3PAE_RECOPY;
             }
         }
@@ -2772,7 +2772,7 @@ static inline void check_for_early_unsha
 static inline void check_for_early_unshadow(struct vcpu *v, mfn_t gmfn)
 {
 #if SHADOW2_OPTIMIZATIONS & SH2OPT_EARLY_UNSHADOW
-    if ( v->arch.last_emulated_mfn == mfn_x(gmfn) &&
+    if ( v->arch.shadow2.last_emulated_mfn == mfn_x(gmfn) &&
          sh2_mfn_is_a_page_table(gmfn) )
     {
         u32 flags = mfn_to_page(gmfn)->shadow2_flags;
@@ -2807,7 +2807,7 @@ static inline void check_for_early_unsha
             }
         }
     }
-    v->arch.last_emulated_mfn = mfn_x(gmfn);
+    v->arch.shadow2.last_emulated_mfn = mfn_x(gmfn);
 #endif
 }
 
@@ -2815,7 +2815,7 @@ static inline void reset_early_unshadow(
 static inline void reset_early_unshadow(struct vcpu *v)
 {
 #if SHADOW2_OPTIMIZATIONS & SH2OPT_EARLY_UNSHADOW
-    v->arch.last_emulated_mfn = INVALID_MFN;
+    v->arch.shadow2.last_emulated_mfn = INVALID_MFN;
 #endif
 }
 
@@ -3000,7 +3000,7 @@ static int sh2_page_fault(struct vcpu *v
 #endif
 
     perfc_incrc(shadow2_fault_fixed);
-    d->arch.shadow_fault_count++;
+    d->arch.shadow2.fault_count++;
     reset_early_unshadow(v);
 
  done:
@@ -3026,7 +3026,7 @@ static int sh2_page_fault(struct vcpu *v
 
     SHADOW2_PRINTK("emulate: eip=%#lx\n", emul_regs.eip);
 
-    v->arch.shadow2_propagate_fault = 0;
+    v->arch.shadow2.propagate_fault = 0;
     if ( x86_emulate_memop(&emul_ctxt, &shadow2_emulator_ops) )
     {
         SHADOW2_PRINTK("emulator failure, unshadowing mfn %#lx\n", 
@@ -3040,7 +3040,7 @@ static int sh2_page_fault(struct vcpu *v
          * guest to loop on the same page fault. */
         goto done;
     }
-    if ( v->arch.shadow2_propagate_fault )
+    if ( v->arch.shadow2.propagate_fault )
     {
         /* Emulation triggered another page fault */
         goto not_a_shadow_fault;
@@ -3493,7 +3493,7 @@ void sh2_pae_recopy(struct domain *d)
     
     for_each_vcpu(d, v)
     {
-        if ( !v->arch.shadow2_pae_flip_pending ) 
+        if ( !v->arch.shadow2.pae_flip_pending ) 
             continue;
 
         cpu_set(v->processor, flush_mask);
@@ -3526,7 +3526,7 @@ void sh2_pae_recopy(struct domain *d)
             }
         }
 #endif
-        v->arch.shadow2_pae_flip_pending = 0;        
+        v->arch.shadow2.pae_flip_pending = 0;        
     }
 
     flush_tlb_mask(flush_mask);
@@ -3612,7 +3612,7 @@ sh2_update_cr3(struct vcpu *v)
 #endif
 
     ASSERT(shadow2_lock_is_acquired(v->domain));
-    ASSERT(v->arch.shadow2);
+    ASSERT(v->arch.shadow2.mode);
 
     ////
     //// vcpu->arch.guest_table is already set
@@ -3713,7 +3713,7 @@ sh2_update_cr3(struct vcpu *v)
     {
         /* Pull this root shadow to the front of the list of roots. */
         list_del(&mfn_to_page(smfn)->list);
-        list_add(&mfn_to_page(smfn)->list, &d->arch.shadow2_toplevel_shadows);
+        list_add(&mfn_to_page(smfn)->list, &d->arch.shadow2.toplevel_shadows);
     }
     else
     {
@@ -3725,7 +3725,7 @@ sh2_update_cr3(struct vcpu *v)
         shadow2_prealloc(d, SHADOW2_MAX_ORDER); 
         /* Shadow the page. */
         smfn = sh2_make_shadow(v, gmfn, PGC_SH2_guest_root_type);
-        list_add(&mfn_to_page(smfn)->list, &d->arch.shadow2_toplevel_shadows);
+        list_add(&mfn_to_page(smfn)->list, &d->arch.shadow2.toplevel_shadows);
     }
     ASSERT(valid_mfn(smfn));
     v->arch.shadow_table = pagetable_from_mfn(smfn);
@@ -4082,7 +4082,7 @@ static inline void * emulate_map_dest(st
          || (!(flags & _PAGE_USER) && ring_3(ctxt->regs)) )
     {
         /* This write would have faulted even on bare metal */
-        v->arch.shadow2_propagate_fault = 1;
+        v->arch.shadow2.propagate_fault = 1;
         return NULL;
     }
     
@@ -4458,7 +4458,7 @@ int sh2_audit_l4_table(struct vcpu *v, m
 /**************************************************************************/
 /* Entry points into this mode of the shadow code.
  * This will all be mangled by the preprocessor to uniquify everything. */
-struct shadow2_entry_points shadow2_entry = {
+struct shadow2_paging_mode sh2_paging_mode = {
     .page_fault             = sh2_page_fault, 
     .invlpg                 = sh2_invlpg,
     .gva_to_gpa             = sh2_gva_to_gpa,
diff -r 5fc1fe790835 -r 45a84091144e xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Sat Aug 19 17:07:54 2006 +0100
+++ b/xen/arch/x86/traps.c      Sun Aug 20 17:55:33 2006 +0100
@@ -923,13 +923,6 @@ asmlinkage int do_page_fault(struct cpu_
 
     perfc_incrc(page_faults);
 
-    if ( shadow2_mode_enabled(current->domain) )
-        debugtrace_printk("%s %s %d dom=%d eip=%p cr2=%p code=%d cs=%x\n",
-                          __func__, __FILE__, __LINE__,
-                          current->domain->domain_id,
-                          (void *)regs->eip, (void *)addr, regs->error_code,
-                          regs->cs);
-
     if ( unlikely((rc = fixup_page_fault(addr, regs)) != 0) )
         return rc;
 
diff -r 5fc1fe790835 -r 45a84091144e xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c       Sat Aug 19 17:07:54 2006 +0100
+++ b/xen/arch/x86/x86_32/traps.c       Sun Aug 20 17:55:33 2006 +0100
@@ -89,7 +89,8 @@ void show_page_walk(unsigned long addr)
     l3e = l3t[l3_table_offset(addr)];
     mfn = l3e_get_pfn(l3e);
     pfn = get_gpfn_from_mfn(mfn);
-    printk(" L3 = %"PRIpte" %08lx\n", l3e_get_intpte(l3e), pfn);
+    printk(" L3[0x%03lx] = %"PRIpte" %08lx\n",
+           l3_table_offset(addr), l3e_get_intpte(l3e), pfn);
     unmap_domain_page(l3t);
     if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
         return;
@@ -99,7 +100,8 @@ void show_page_walk(unsigned long addr)
     l2e = l2t[l2_table_offset(addr)];
     mfn = l2e_get_pfn(l2e);
     pfn = get_gpfn_from_mfn(mfn);
-    printk("  L2 = %"PRIpte" %08lx %s\n", l2e_get_intpte(l2e), pfn, 
+    printk(" L2[0x%03lx] = %"PRIpte" %08lx %s\n",
+           l2_table_offset(addr), l2e_get_intpte(l2e), pfn,
            (l2e_get_flags(l2e) & _PAGE_PSE) ? "(PSE)" : "");
     unmap_domain_page(l2t);
     if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
@@ -110,7 +112,8 @@ void show_page_walk(unsigned long addr)
     l1e = l1t[l1_table_offset(addr)];
     mfn = l1e_get_pfn(l1e);
     pfn = get_gpfn_from_mfn(mfn);
-    printk("   L1 = %"PRIpte" %08lx\n", l1e_get_intpte(l1e), pfn);
+    printk(" L1[0x%03lx] = %"PRIpte" %08lx\n",
+           l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
     unmap_domain_page(l1t);
 }
 
diff -r 5fc1fe790835 -r 45a84091144e xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c       Sat Aug 19 17:07:54 2006 +0100
+++ b/xen/arch/x86/x86_64/traps.c       Sun Aug 20 17:55:33 2006 +0100
@@ -84,7 +84,7 @@ void show_page_walk(unsigned long addr)
     l4e = l4t[l4_table_offset(addr)];
     mfn = l4e_get_pfn(l4e);
     pfn = get_gpfn_from_mfn(mfn);
-    printk(" L4[0x%lx] = %"PRIpte" %016lx\n",
+    printk(" L4[0x%03lx] = %"PRIpte" %016lx\n",
            l4_table_offset(addr), l4e_get_intpte(l4e), pfn);
     if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
         return;
@@ -93,7 +93,7 @@ void show_page_walk(unsigned long addr)
     l3e = l3t[l3_table_offset(addr)];
     mfn = l3e_get_pfn(l3e);
     pfn = get_gpfn_from_mfn(mfn);
-    printk("  L3[0x%lx] = %"PRIpte" %016lx\n",
+    printk(" L3[0x%03lx] = %"PRIpte" %016lx\n",
            l3_table_offset(addr), l3e_get_intpte(l3e), pfn);
     if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
         return;
@@ -102,7 +102,7 @@ void show_page_walk(unsigned long addr)
     l2e = l2t[l2_table_offset(addr)];
     mfn = l2e_get_pfn(l2e);
     pfn = get_gpfn_from_mfn(mfn);
-    printk("   L2[0x%lx] = %"PRIpte" %016lx %s\n",
+    printk(" L2[0x%03lx] = %"PRIpte" %016lx %s\n",
            l2_table_offset(addr), l2e_get_intpte(l2e), pfn,
            (l2e_get_flags(l2e) & _PAGE_PSE) ? "(PSE)" : "");
     if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
@@ -113,7 +113,7 @@ void show_page_walk(unsigned long addr)
     l1e = l1t[l1_table_offset(addr)];
     mfn = l1e_get_pfn(l1e);
     pfn = get_gpfn_from_mfn(mfn);
-    printk("    L1[0x%lx] = %"PRIpte" %016lx\n",
+    printk(" L1[0x%03lx] = %"PRIpte" %016lx\n",
            l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
 }
 
diff -r 5fc1fe790835 -r 45a84091144e xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Sat Aug 19 17:07:54 2006 +0100
+++ b/xen/include/asm-x86/domain.h      Sun Aug 20 17:55:33 2006 +0100
@@ -57,6 +57,34 @@ extern void toggle_guest_mode(struct vcp
  */
 extern void hypercall_page_initialise(struct domain *d, void *);
 
+struct shadow_domain {
+    u32               mode;  /* flags to control shadow operation */
+    spinlock_t        lock;  /* shadow2 domain lock */
+    int               locker; /* processor which holds the lock */
+    const char       *locker_function; /* Func that took it */
+    struct list_head  freelists[SHADOW2_MAX_ORDER + 1]; 
+    struct list_head  p2m_freelist;
+    struct list_head  p2m_inuse;
+    struct list_head  toplevel_shadows;
+    unsigned int      total_pages;  /* number of pages allocated */
+    unsigned int      free_pages;   /* number of pages on freelists */
+    unsigned int      p2m_pages;    /* number of pages in p2m map */
+
+    /* Shadow2 hashtable */
+    struct shadow2_hash_entry *hash_table;
+    struct shadow2_hash_entry *hash_freelist;
+    struct shadow2_hash_entry *hash_allocations;
+    int hash_walking;  /* Some function is walking the hash table */
+
+    /* Shadow log-dirty bitmap */
+    unsigned long *dirty_bitmap;
+    unsigned int dirty_bitmap_size;  /* in pages, bit per page */
+
+    /* Shadow log-dirty mode stats */
+    unsigned int fault_count;
+    unsigned int dirty_count;
+};
+
 struct arch_domain
 {
     l1_pgentry_t *mm_perdomain_pt;
@@ -79,32 +107,7 @@ struct arch_domain
     /* Shadow-translated guest: Pseudophys base address of reserved area. */
     unsigned long first_reserved_pfn;
 
-    /* Shadow2 stuff */
-    u32               shadow2_mode;  /* flags to control shadow operation */
-    spinlock_t        shadow2_lock;  /* shadow2 domain lock */
-    int               shadow2_locker; /* processor which holds the lock */
-    const char       *shadow2_locker_function; /* Func that took it */
-    struct list_head  shadow2_freelists[SHADOW2_MAX_ORDER + 1]; 
-    struct list_head  shadow2_p2m_freelist;
-    struct list_head  shadow2_p2m_inuse;
-    struct list_head  shadow2_toplevel_shadows;
-    unsigned int      shadow2_total_pages;  /* number of pages allocated */
-    unsigned int      shadow2_free_pages;   /* number of pages on freelists */
-    unsigned int      shadow2_p2m_pages;    /* number of pages in p2m map */
-
-    /* Shadow2 hashtable */
-    struct shadow2_hash_entry *shadow2_hash_table;
-    struct shadow2_hash_entry *shadow2_hash_freelist;
-    struct shadow2_hash_entry *shadow2_hash_allocations;
-    int shadow2_hash_walking;  /* Some function is walking the hash table */
-
-    /* Shadow log-dirty bitmap */
-    unsigned long *shadow_dirty_bitmap;
-    unsigned int shadow_dirty_bitmap_size;  /* in pages, bit per page */
-
-    /* Shadow log-dirty mode stats */
-    unsigned int shadow_fault_count;
-    unsigned int shadow_dirty_count;
+    struct shadow_domain shadow2;
 
     /* Shadow translated domain: P2M mapping */
     pagetable_t phys_table;
@@ -130,6 +133,21 @@ struct pae_l3_cache { };
 #define pae_l3_cache_init(c) ((void)0)
 #endif
 
+struct shadow_vcpu {
+    /* Pointers to mode-specific entry points. */
+    struct shadow2_paging_mode *mode;
+    /* Last MFN that we emulated a write to. */
+    unsigned long last_emulated_mfn;
+    /* HVM guest: paging enabled (CR0.PG)?  */
+    unsigned int hvm_paging_enabled:1;
+    /* Emulated fault needs to be propagated to guest? */
+    unsigned int propagate_fault:1;
+#if CONFIG_PAGING_LEVELS >= 3
+    /* Shadow update requires this PAE cpu to recopy/install its L3 table. */
+    unsigned int pae_flip_pending:1;
+#endif
+};
+
 struct arch_vcpu
 {
     /* Needs 16-byte aligment for FXSAVE/FXRSTOR. */
@@ -183,17 +201,7 @@ struct arch_vcpu
     /* Current LDT details. */
     unsigned long shadow_ldt_mapcnt;
 
-    /* Shadow2 stuff */
-    /* -- pointers to mode-specific entry points */
-    struct shadow2_entry_points *shadow2; 
-    unsigned long last_emulated_mfn;    /* last mfn we emulated a write to */
-    u8 shadow2_propagate_fault;         /* emulated fault needs to be */
-                                        /* propagated to guest */
-#if CONFIG_PAGING_LEVELS >= 3
-    u8 shadow2_pae_flip_pending;        /* shadow update requires this PAE cpu
-                                         * to recopy/install its L3 table.
-                                         */
-#endif
+    struct shadow_vcpu shadow2;
 } __cacheline_aligned;
 
 /* shorthands to improve code legibility */
diff -r 5fc1fe790835 -r 45a84091144e xen/include/asm-x86/shadow2-multi.h
--- a/xen/include/asm-x86/shadow2-multi.h       Sat Aug 19 17:07:54 2006 +0100
+++ b/xen/include/asm-x86/shadow2-multi.h       Sun Aug 20 17:55:33 2006 +0100
@@ -112,5 +112,5 @@ SHADOW2_INTERNAL_NAME(sh2_destroy_monito
     (struct vcpu *v, mfn_t mmfn);
 #endif
 
-extern struct shadow2_entry_points 
-SHADOW2_INTERNAL_NAME(shadow2_entry, SHADOW_LEVELS, GUEST_LEVELS);
+extern struct shadow2_paging_mode 
+SHADOW2_INTERNAL_NAME(sh2_paging_mode, SHADOW_LEVELS, GUEST_LEVELS);
diff -r 5fc1fe790835 -r 45a84091144e xen/include/asm-x86/shadow2-private.h
--- a/xen/include/asm-x86/shadow2-private.h     Sat Aug 19 17:07:54 2006 +0100
+++ b/xen/include/asm-x86/shadow2-private.h     Sun Aug 20 17:55:33 2006 +0100
@@ -200,40 +200,40 @@ enum sh2_log_type { log_slow = 0, log_fa
 /* Alloc and zero the logs */
 static inline void sh2_init_log(struct vcpu *v) 
 {
-    if ( unlikely(!v->arch.shadow2_action_log) ) 
-        v->arch.shadow2_action_log = xmalloc_array(sh2_log_t, 2);
-    ASSERT(v->arch.shadow2_action_log);
-    memset(v->arch.shadow2_action_log, 0, 2 * sizeof (sh2_log_t));
+    if ( unlikely(!v->arch.shadow2.action_log) ) 
+        v->arch.shadow2.action_log = xmalloc_array(sh2_log_t, 2);
+    ASSERT(v->arch.shadow2.action_log);
+    memset(v->arch.shadow2.action_log, 0, 2 * sizeof (sh2_log_t));
 }
 
 /* Log an A&D-bit update */
 static inline void sh2_log_ad(struct vcpu *v, paddr_t e, unsigned int level)
 {
-    v->arch.shadow2_action_log[v->arch.shadow2_action_index].ad[level] = e;
+    v->arch.shadow2.action_log[v->arch.shadow2.action_index].ad[level] = e;
 }
 
 /* Log an MMIO address */
 static inline void sh2_log_mmio(struct vcpu *v, paddr_t m)
 {
-    v->arch.shadow2_action_log[v->arch.shadow2_action_index].mmio = m;
+    v->arch.shadow2.action_log[v->arch.shadow2.action_index].mmio = m;
 }
 
 /* Log the result */
 static inline void sh2_log_rv(struct vcpu *v, int rv)
 {
-    v->arch.shadow2_action_log[v->arch.shadow2_action_index].rv = rv;
+    v->arch.shadow2.action_log[v->arch.shadow2.action_index].rv = rv;
 }
 
 /* Set which mode we're in */
 static inline void sh2_set_log_mode(struct vcpu *v, enum sh2_log_type t) 
 {
-    v->arch.shadow2_action_index = t;
+    v->arch.shadow2.action_index = t;
 }
 
 /* Know not to take action, because we're only checking the mechanism */
 static inline int sh2_take_no_action(struct vcpu *v) 
 {
-    return (v->arch.shadow2_action_index == log_fast);
+    return (v->arch.shadow2.action_index == log_fast);
 }
 
 #else /* Non-paranoid mode: these logs do not exist */
@@ -400,13 +400,13 @@ sh2_mfn_is_dirty(struct domain *d, mfn_t
 {
     unsigned long pfn;
     ASSERT(shadow2_mode_log_dirty(d));
-    ASSERT(d->arch.shadow_dirty_bitmap != NULL);
+    ASSERT(d->arch.shadow2.dirty_bitmap != NULL);
 
     /* We /really/ mean PFN here, even for non-translated guests. */
     pfn = get_gpfn_from_mfn(mfn_x(gmfn));
     if ( likely(VALID_M2P(pfn))
-         && likely(pfn < d->arch.shadow_dirty_bitmap_size) 
-         && test_bit(pfn, d->arch.shadow_dirty_bitmap) )
+         && likely(pfn < d->arch.shadow2.dirty_bitmap_size) 
+         && test_bit(pfn, d->arch.shadow2.dirty_bitmap) )
         return 1;
 
     return 0;
diff -r 5fc1fe790835 -r 45a84091144e xen/include/asm-x86/shadow2-types.h
--- a/xen/include/asm-x86/shadow2-types.h       Sat Aug 19 17:07:54 2006 +0100
+++ b/xen/include/asm-x86/shadow2-types.h       Sun Aug 20 17:55:33 2006 +0100
@@ -507,7 +507,7 @@ struct shadow2_walk_t
 #define sh2_unhook_32b_mappings     INTERNAL_NAME(sh2_unhook_32b_mappings)
 #define sh2_unhook_pae_mappings     INTERNAL_NAME(sh2_unhook_pae_mappings)
 #define sh2_unhook_64b_mappings     INTERNAL_NAME(sh2_unhook_64b_mappings)
-#define shadow2_entry               INTERNAL_NAME(shadow2_entry)
+#define sh2_paging_mode             INTERNAL_NAME(sh2_paging_mode)
 #define sh2_detach_old_tables       INTERNAL_NAME(sh2_detach_old_tables)
 #define sh2_x86_emulate_write       INTERNAL_NAME(sh2_x86_emulate_write)
 #define sh2_x86_emulate_cmpxchg     INTERNAL_NAME(sh2_x86_emulate_cmpxchg)
diff -r 5fc1fe790835 -r 45a84091144e xen/include/asm-x86/shadow2.h
--- a/xen/include/asm-x86/shadow2.h     Sat Aug 19 17:07:54 2006 +0100
+++ b/xen/include/asm-x86/shadow2.h     Sun Aug 20 17:55:33 2006 +0100
@@ -43,11 +43,11 @@
  * requires VT or similar mechanisms */
 #define SHM2_external  (DOM0_SHADOW2_CONTROL_FLAG_EXTERNAL << SHM2_shift)
 
-#define shadow2_mode_enabled(_d)   ((_d)->arch.shadow2_mode)
-#define shadow2_mode_refcounts(_d) ((_d)->arch.shadow2_mode & SHM2_refcounts)
-#define shadow2_mode_log_dirty(_d) ((_d)->arch.shadow2_mode & SHM2_log_dirty)
-#define shadow2_mode_translate(_d) ((_d)->arch.shadow2_mode & SHM2_translate)
-#define shadow2_mode_external(_d)  ((_d)->arch.shadow2_mode & SHM2_external)
+#define shadow2_mode_enabled(_d)   ((_d)->arch.shadow2.mode)
+#define shadow2_mode_refcounts(_d) ((_d)->arch.shadow2.mode & SHM2_refcounts)
+#define shadow2_mode_log_dirty(_d) ((_d)->arch.shadow2.mode & SHM2_log_dirty)
+#define shadow2_mode_translate(_d) ((_d)->arch.shadow2.mode & SHM2_translate)
+#define shadow2_mode_external(_d)  ((_d)->arch.shadow2.mode & SHM2_external)
 
 /* Xen traps & emulates all reads of all page table pages:
  *not yet supported
@@ -92,34 +92,34 @@
 
 #define shadow2_lock_init(_d)                                   \
     do {                                                        \
-        spin_lock_init(&(_d)->arch.shadow2_lock);               \
-        (_d)->arch.shadow2_locker = -1;                         \
-        (_d)->arch.shadow2_locker_function = "nobody";          \
+        spin_lock_init(&(_d)->arch.shadow2.lock);               \
+        (_d)->arch.shadow2.locker = -1;                         \
+        (_d)->arch.shadow2.locker_function = "nobody";          \
     } while (0)
 
 #define shadow2_lock_is_acquired(_d)                            \
-    (current->processor == (_d)->arch.shadow2_locker)
+    (current->processor == (_d)->arch.shadow2.locker)
 
 #define shadow2_lock(_d)                                                 \
     do {                                                                 \
-        if ( unlikely((_d)->arch.shadow2_locker == current->processor) ) \
+        if ( unlikely((_d)->arch.shadow2.locker == current->processor) ) \
         {                                                                \
             printk("Error: shadow2 lock held by %s\n",                   \
-                   (_d)->arch.shadow2_locker_function);                  \
+                   (_d)->arch.shadow2.locker_function);                  \
             BUG();                                                       \
         }                                                                \
-        spin_lock(&(_d)->arch.shadow2_lock);                             \
-        ASSERT((_d)->arch.shadow2_locker == -1);                         \
-        (_d)->arch.shadow2_locker = current->processor;                  \
-        (_d)->arch.shadow2_locker_function = __func__;                   \
+        spin_lock(&(_d)->arch.shadow2.lock);                             \
+        ASSERT((_d)->arch.shadow2.locker == -1);                         \
+        (_d)->arch.shadow2.locker = current->processor;                  \
+        (_d)->arch.shadow2.locker_function = __func__;                   \
     } while (0)
 
 #define shadow2_unlock(_d)                                              \
     do {                                                                \
-        ASSERT((_d)->arch.shadow2_locker == current->processor);        \
-        (_d)->arch.shadow2_locker = -1;                                 \
-        (_d)->arch.shadow2_locker_function = "nobody";                  \
-        spin_unlock(&(_d)->arch.shadow2_lock);                          \
+        ASSERT((_d)->arch.shadow2.locker == current->processor);        \
+        (_d)->arch.shadow2.locker = -1;                                 \
+        (_d)->arch.shadow2.locker_function = "nobody";                  \
+        spin_unlock(&(_d)->arch.shadow2.lock);                          \
     } while (0)
 
 /* 
@@ -232,7 +232,7 @@ shadow2_vcpu_mode_translate(struct vcpu 
     // enabled.  (HVM vcpu's with paging disabled are using the p2m table as
     // its paging table, so no translation occurs in this case.)
     //
-    return v->vcpu_flags & VCPUF_shadow2_translate;
+    return v->arch.shadow2.hvm_paging_enabled;
 }
 
 
@@ -240,7 +240,7 @@ shadow2_vcpu_mode_translate(struct vcpu 
 /* Mode-specific entry points into the shadow code */
 
 struct x86_emulate_ctxt;
-struct shadow2_entry_points {
+struct shadow2_paging_mode {
     int           (*page_fault            )(struct vcpu *v, unsigned long va,
                                             struct cpu_user_regs *regs);
     int           (*invlpg                )(struct vcpu *v, unsigned long va);
@@ -285,8 +285,8 @@ struct shadow2_entry_points {
 
 static inline int shadow2_guest_paging_levels(struct vcpu *v)
 {
-    ASSERT(v->arch.shadow2 != NULL);
-    return v->arch.shadow2->guest_levels;
+    ASSERT(v->arch.shadow2.mode != NULL);
+    return v->arch.shadow2.mode->guest_levels;
 }
 
 /**************************************************************************/
@@ -337,7 +337,7 @@ shadow2_fault(unsigned long va, struct c
 {
     struct vcpu *v = current;
     perfc_incrc(shadow2_fault);
-    return v->arch.shadow2->page_fault(v, va, regs);
+    return v->arch.shadow2.mode->page_fault(v, va, regs);
 }
 
 static inline int
@@ -346,7 +346,7 @@ shadow2_invlpg(struct vcpu *v, unsigned 
  * instruction should be issued on the hardware, or 0 if it's safe not
  * to do so. */
 {
-    return v->arch.shadow2->invlpg(v, va);
+    return v->arch.shadow2.mode->invlpg(v, va);
 }
 
 static inline unsigned long
@@ -354,7 +354,7 @@ shadow2_gva_to_gpa(struct vcpu *v, unsig
 /* Called to translate a guest virtual address to what the *guest*
  * pagetables would map it to. */
 {
-    return v->arch.shadow2->gva_to_gpa(v, va);
+    return v->arch.shadow2.mode->gva_to_gpa(v, va);
 }
 
 static inline unsigned long
@@ -362,7 +362,7 @@ shadow2_gva_to_gfn(struct vcpu *v, unsig
 /* Called to translate a guest virtual address to what the *guest*
  * pagetables would map it to. */
 {
-    return v->arch.shadow2->gva_to_gfn(v, va);
+    return v->arch.shadow2.mode->gva_to_gfn(v, va);
 }
 
 static inline void
@@ -371,7 +371,7 @@ shadow2_update_cr3(struct vcpu *v)
  * Called when the guest changes CR3. */
 {
     shadow2_lock(v->domain);
-    v->arch.shadow2->update_cr3(v);
+    v->arch.shadow2.mode->update_cr3(v);
     shadow2_unlock(v->domain);
 }
 
@@ -425,19 +425,20 @@ static inline void
 static inline void
 shadow2_detach_old_tables(struct vcpu *v)
 {
-    v->arch.shadow2->detach_old_tables(v);
+    if ( v->arch.shadow2.mode )
+        v->arch.shadow2.mode->detach_old_tables(v);
 }
 
 static inline mfn_t
 shadow2_make_monitor_table(struct vcpu *v)
 {
-    return v->arch.shadow2->make_monitor_table(v);
+    return v->arch.shadow2.mode->make_monitor_table(v);
 }
 
 static inline void
 shadow2_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
 {
-    v->arch.shadow2->destroy_monitor_table(v, mmfn);
+    v->arch.shadow2.mode->destroy_monitor_table(v, mmfn);
 }
 
 /* Validate a pagetable change from the guest and update the shadows. */
@@ -526,7 +527,7 @@ unsigned int shadow2_set_allocation(stru
 /* Return the size of the shadow2 pool, rounded up to the nearest MB */
 static inline unsigned int shadow2_get_allocation(struct domain *d)
 {
-    unsigned int pg = d->arch.shadow2_total_pages;
+    unsigned int pg = d->arch.shadow2.total_pages;
     return ((pg >> (20 - PAGE_SHIFT))
             + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
 }
diff -r 5fc1fe790835 -r 45a84091144e xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Sat Aug 19 17:07:54 2006 +0100
+++ b/xen/include/xen/sched.h   Sun Aug 20 17:55:33 2006 +0100
@@ -379,9 +379,6 @@ extern struct domain *domain_list;
 /* VCPU is blocked awaiting an event to be consumed by Xen. */
 #define _VCPUF_blocked_in_xen  12
 #define VCPUF_blocked_in_xen   (1UL<<_VCPUF_blocked_in_xen)
- /* HVM vcpu thinks CR0.PG == 0 */
-#define _VCPUF_shadow2_translate 13
-#define VCPUF_shadow2_translate  (1UL<<_VCPUF_shadow2_translate)
 
 /*
  * Per-domain flags (domain_flags).

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.