[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xen: More 'IS_COMPAT' cleanups.



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1177682815 -3600
# Node ID 405573aedd24788cf28d4f0cad5648a9e297ca3f
# Parent  53b9883bbcc36d8756945b28cb20cca2ff0af302
xen: More 'IS_COMPAT' cleanups.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/domain.c             |   57 ++++++++++++++---------
 xen/arch/x86/domain_build.c       |   53 +++++++++++----------
 xen/arch/x86/domctl.c             |    8 +--
 xen/arch/x86/hvm/hvm.c            |    4 -
 xen/arch/x86/machine_kexec.c      |    5 --
 xen/arch/x86/mm.c                 |   62 ++++++++++++-------------
 xen/arch/x86/mm/shadow/common.c   |    6 +-
 xen/arch/x86/mm/shadow/multi.c    |   22 ++++----
 xen/arch/x86/traps.c              |   18 +++----
 xen/arch/x86/x86_64/asm-offsets.c |    2 
 xen/arch/x86/x86_64/entry.S       |    8 +--
 xen/arch/x86/x86_64/mm.c          |    6 +-
 xen/arch/x86/x86_64/traps.c       |    4 -
 xen/include/asm-x86/desc.h        |    4 -
 xen/include/asm-x86/domain.h      |   23 +++++----
 xen/include/asm-x86/ldt.h         |    2 
 xen/include/asm-x86/shared.h      |   94 +++++++++++++++++++-------------------
 xen/include/asm-x86/x86_64/page.h |    4 -
 xen/include/asm-x86/x86_64/regs.h |   10 ++--
 xen/include/xen/sched.h           |    8 ---
 xen/include/xen/shared.h          |   20 ++++----
 21 files changed, 217 insertions(+), 203 deletions(-)

diff -r 53b9883bbcc3 -r 405573aedd24 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/arch/x86/domain.c     Fri Apr 27 15:06:55 2007 +0100
@@ -272,10 +272,10 @@ int switch_native(struct domain *d)
         return -EINVAL;
     if ( !may_switch_mode(d) )
         return -EACCES;
-    if ( !IS_COMPAT(d) )
+    if ( !is_pv_32on64_domain(d) )
         return 0;
 
-    d->is_compat = 0;
+    d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
     release_arg_xlat_area(d);
 
     /* switch gdt */
@@ -304,10 +304,10 @@ int switch_compat(struct domain *d)
         return -ENOSYS;
     if ( !may_switch_mode(d) )
         return -EACCES;
-    if ( IS_COMPAT(d) )
+    if ( is_pv_32on64_domain(d) )
         return 0;
 
-    d->is_compat = 1;
+    d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
 
     /* switch gdt */
     gdt_l1e = l1e_from_page(virt_to_page(compat_gdt_table), PAGE_HYPERVISOR);
@@ -372,12 +372,12 @@ int vcpu_initialise(struct vcpu *v)
     v->arch.perdomain_ptes =
         d->arch.mm_perdomain_pt + (v->vcpu_id << GDT_LDT_VCPU_SHIFT);
 
-    return (pv_32on64_vcpu(v) ? setup_compat_l4(v) : 0);
+    return (is_pv_32on64_vcpu(v) ? setup_compat_l4(v) : 0);
 }
 
 void vcpu_destroy(struct vcpu *v)
 {
-    if ( pv_32on64_vcpu(v) )
+    if ( is_pv_32on64_vcpu(v) )
         release_compat_l4(v);
 }
 
@@ -453,7 +453,20 @@ int arch_domain_create(struct domain *d)
             virt_to_page(d->shared_info), d, XENSHARE_writable);
     }
 
-    return is_hvm_domain(d) ? hvm_domain_initialise(d) : 0;
+    if ( is_hvm_domain(d) )
+    {
+        if ( (rc = hvm_domain_initialise(d)) != 0 )
+            goto fail;
+    }
+    else
+    {
+        /* 32-bit PV guest by default only if Xen is not 64-bit. */
+        d->arch.is_32bit_pv = d->arch.has_32bit_shinfo =
+            (CONFIG_PAGING_LEVELS != 4);
+    }
+        
+
+    return 0;
 
  fail:
     free_xenheap_page(d->shared_info);
@@ -489,7 +502,7 @@ void arch_domain_destroy(struct domain *
     free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
 #endif
 
-    if ( pv_32on64_domain(d) )
+    if ( is_pv_32on64_domain(d) )
         release_arg_xlat_area(d);
 
     free_xenheap_page(d->shared_info);
@@ -506,7 +519,7 @@ int arch_set_info_guest(
 
     /* The context is a compat-mode one if the target domain is compat-mode;
      * we expect the tools to DTRT even in compat-mode callers. */
-    compat = pv_32on64_domain(d);
+    compat = is_pv_32on64_domain(d);
 
 #ifdef CONFIG_COMPAT
 #define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
@@ -831,7 +844,7 @@ static void load_segments(struct vcpu *n
             all_segs_okay &= loadsegment(gs, nctxt->user_regs.gs);
     }
 
-    if ( !IS_COMPAT(n->domain) )
+    if ( !is_pv_32on64_domain(n->domain) )
     {
         /* This can only be non-zero if selector is NULL. */
         if ( nctxt->fs_base )
@@ -865,7 +878,7 @@ static void load_segments(struct vcpu *n
             (unsigned long *)nctxt->kernel_sp;
         unsigned long cs_and_mask, rflags;
 
-        if ( IS_COMPAT(n->domain) )
+        if ( is_pv_32on64_domain(n->domain) )
         {
             unsigned int *esp = ring_1(regs) ?
                                 (unsigned int *)regs->rsp :
@@ -975,7 +988,7 @@ static void save_segments(struct vcpu *v
     if ( regs->es )
         dirty_segment_mask |= DIRTY_ES;
 
-    if ( regs->fs || IS_COMPAT(v->domain) )
+    if ( regs->fs || is_pv_32on64_domain(v->domain) )
     {
         dirty_segment_mask |= DIRTY_FS;
         ctxt->fs_base = 0; /* != 0 selector kills fs_base */
@@ -985,7 +998,7 @@ static void save_segments(struct vcpu *v
         dirty_segment_mask |= DIRTY_FS_BASE;
     }
 
-    if ( regs->gs || IS_COMPAT(v->domain) )
+    if ( regs->gs || is_pv_32on64_domain(v->domain) )
     {
         dirty_segment_mask |= DIRTY_GS;
         ctxt->gs_base_user = 0; /* != 0 selector kills gs_base_user */
@@ -1121,15 +1134,17 @@ void context_switch(struct vcpu *prev, s
         __context_switch();
 
 #ifdef CONFIG_COMPAT
-        if ( is_idle_vcpu(prev)
-             || IS_COMPAT(prev->domain) != IS_COMPAT(next->domain) )
+        if ( is_idle_vcpu(prev) ||
+             (is_pv_32on64_domain(prev->domain) !=
+              is_pv_32on64_domain(next->domain)) )
         {
             uint32_t efer_lo, efer_hi;
 
-            local_flush_tlb_one(GDT_VIRT_START(next) + 
FIRST_RESERVED_GDT_BYTE);
+            local_flush_tlb_one(GDT_VIRT_START(next) +
+                                FIRST_RESERVED_GDT_BYTE);
 
             rdmsr(MSR_EFER, efer_lo, efer_hi);
-            if ( !IS_COMPAT(next->domain) == !(efer_lo & EFER_SCE) )
+            if ( !is_pv_32on64_domain(next->domain) == !(efer_lo & EFER_SCE) )
             {
                 efer_lo ^= EFER_SCE;
                 wrmsr(MSR_EFER, efer_lo, efer_hi);
@@ -1152,7 +1167,7 @@ void context_switch(struct vcpu *prev, s
     /* Update per-VCPU guest runstate shared memory area (if registered). */
     if ( !guest_handle_is_null(runstate_guest(next)) )
     {
-        if ( !IS_COMPAT(next->domain) )
+        if ( !is_pv_32on64_domain(next->domain) )
             __copy_to_guest(runstate_guest(next), &next->runstate, 1);
 #ifdef CONFIG_COMPAT
         else
@@ -1234,7 +1249,7 @@ unsigned long hypercall_create_continuat
 
         for ( i = 0; *p != '\0'; i++ )
             mcs->call.args[i] = next_arg(p, args);
-        if ( IS_COMPAT(current->domain) )
+        if ( is_pv_32on64_domain(current->domain) )
         {
             for ( ; i < 6; i++ )
                 mcs->call.args[i] = 0;
@@ -1247,7 +1262,7 @@ unsigned long hypercall_create_continuat
         regs->eip -= 2;  /* re-execute 'syscall' / 'int 0x82' */
 
 #ifdef __x86_64__
-        if ( !IS_COMPAT(current->domain) )
+        if ( !is_pv_32on64_domain(current->domain) )
         {
             for ( i = 0; *p != '\0'; i++ )
             {
@@ -1448,7 +1463,7 @@ static void vcpu_destroy_pagetables(stru
     unsigned long pfn;
 
 #ifdef __x86_64__
-    if ( pv_32on64_vcpu(v) )
+    if ( is_pv_32on64_vcpu(v) )
     {
         pfn = l4e_get_pfn(*(l4_pgentry_t *)
                           __va(pagetable_get_paddr(v->arch.guest_table)));
diff -r 53b9883bbcc3 -r 405573aedd24 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/arch/x86/domain_build.c       Fri Apr 27 15:06:55 2007 +0100
@@ -320,11 +320,11 @@ int construct_dom0(struct domain *d,
     }
 
 #ifdef CONFIG_COMPAT
-    if (compat32)
+    if ( compat32 )
     {
         l1_pgentry_t gdt_l1e;
 
-        d->is_compat = 1;
+        d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
         v->vcpu_info = (void *)&d->shared_info->compat.vcpu_info[0];
 
         if ( nr_pages != (unsigned int)nr_pages )
@@ -350,19 +350,19 @@ int construct_dom0(struct domain *d,
 #if CONFIG_PAGING_LEVELS < 4
         unsigned long mask = (1UL << L2_PAGETABLE_SHIFT) - 1;
 #else
-        unsigned long mask = !IS_COMPAT(d)
-                             ? (1UL << L4_PAGETABLE_SHIFT) - 1
-                             : (1UL << L2_PAGETABLE_SHIFT) - 1;
+        unsigned long mask = is_pv_32bit_domain(d)
+                             ? (1UL << L2_PAGETABLE_SHIFT) - 1
+                             : (1UL << L4_PAGETABLE_SHIFT) - 1;
 #endif
 
         value = (parms.virt_hv_start_low + mask) & ~mask;
 #ifdef CONFIG_COMPAT
         HYPERVISOR_COMPAT_VIRT_START(d) =
             max_t(unsigned int, m2p_compat_vstart, value);
-        d->arch.physaddr_bitsize = !IS_COMPAT(d) ? 64 :
+        d->arch.physaddr_bitsize = !is_pv_32on64_domain(d) ? 64 :
             fls((1UL << 32) - HYPERVISOR_COMPAT_VIRT_START(d)) - 1
             + (PAGE_SIZE - 2);
-        if ( value > (!IS_COMPAT(d) ?
+        if ( value > (!is_pv_32on64_domain(d) ?
                       HYPERVISOR_VIRT_START :
                       __HYPERVISOR_COMPAT_VIRT_START) )
 #else
@@ -387,7 +387,7 @@ int construct_dom0(struct domain *d,
     vinitrd_start    = round_pgup(vkern_end);
     vinitrd_end      = vinitrd_start + initrd_len;
     vphysmap_start   = round_pgup(vinitrd_end);
-    vphysmap_end     = vphysmap_start + (nr_pages * (!IS_COMPAT(d) ?
+    vphysmap_end     = vphysmap_start + (nr_pages * (!is_pv_32on64_domain(d) ?
                                                      sizeof(unsigned long) :
                                                      sizeof(unsigned int)));
     vstartinfo_start = round_pgup(vphysmap_end);
@@ -418,7 +418,7 @@ int construct_dom0(struct domain *d,
        ((_l) & ~((1UL<<(_s))-1))) >> (_s))
         if ( (1 + /* # L4 */
               NR(v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
-              (!IS_COMPAT(d) ?
+              (!is_pv_32on64_domain(d) ?
                NR(v_start, v_end, L3_PAGETABLE_SHIFT) : /* # L2 */
                4) + /* # compat L2 */
               NR(v_start, v_end, L2_PAGETABLE_SHIFT))  /* # L1 */
@@ -613,7 +613,7 @@ int construct_dom0(struct domain *d,
 #elif defined(__x86_64__)
 
     /* Overlap with Xen protected area? */
-    if ( !IS_COMPAT(d) ?
+    if ( !is_pv_32on64_domain(d) ?
          ((v_start < HYPERVISOR_VIRT_END) &&
           (v_end > HYPERVISOR_VIRT_START)) :
          (v_end > HYPERVISOR_COMPAT_VIRT_START(d)) )
@@ -622,14 +622,14 @@ int construct_dom0(struct domain *d,
         return -EINVAL;
     }
 
-    if ( IS_COMPAT(d) )
+    if ( is_pv_32on64_domain(d) )
     {
         v->arch.guest_context.failsafe_callback_cs = FLAT_COMPAT_KERNEL_CS;
         v->arch.guest_context.event_callback_cs    = FLAT_COMPAT_KERNEL_CS;
     }
 
     /* WARNING: The new domain must have its 'processor' field filled in! */
-    if ( !IS_COMPAT(d) )
+    if ( !is_pv_32on64_domain(d) )
     {
         maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
         l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
@@ -647,7 +647,7 @@ int construct_dom0(struct domain *d,
     l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
         l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
     v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
-    if ( IS_COMPAT(d) )
+    if ( is_pv_32on64_domain(d) )
     {
         v->arch.guest_table_user = v->arch.guest_table;
         if ( setup_arg_xlat_area(v, l4start) < 0 )
@@ -689,7 +689,8 @@ int construct_dom0(struct domain *d,
             *l2tab = l2e_from_paddr(__pa(l1start), L2_PROT);
             l2tab++;
         }
-        *l1tab = l1e_from_pfn(mfn, !IS_COMPAT(d) ? L1_PROT : COMPAT_L1_PROT);
+        *l1tab = l1e_from_pfn(mfn, (!is_pv_32on64_domain(d) ?
+                                    L1_PROT : COMPAT_L1_PROT));
         l1tab++;
 
         page = mfn_to_page(mfn);
@@ -701,7 +702,7 @@ int construct_dom0(struct domain *d,
     }
 
 #ifdef CONFIG_COMPAT
-    if ( IS_COMPAT(d) )
+    if ( is_pv_32on64_domain(d) )
     {
         /* Ensure the first four L3 entries are all populated. */
         for ( i = 0, l3tab = l3start; i < 4; ++i, ++l3tab )
@@ -743,7 +744,8 @@ int construct_dom0(struct domain *d,
 
         /* Top-level p.t. is pinned. */
         if ( (page->u.inuse.type_info & PGT_type_mask) ==
-             (!IS_COMPAT(d) ? PGT_l4_page_table : PGT_l3_page_table) )
+             (!is_pv_32on64_domain(d) ?
+              PGT_l4_page_table : PGT_l3_page_table) )
         {
             page->count_info        += 1;
             page->u.inuse.type_info += 1 | PGT_pinned;
@@ -823,7 +825,7 @@ int construct_dom0(struct domain *d,
     si->shared_info = virt_to_maddr(d->shared_info);
 
     si->flags        = SIF_PRIVILEGED | SIF_INITDOMAIN;
-    si->pt_base      = vpt_start + 2 * PAGE_SIZE * !!IS_COMPAT(d);
+    si->pt_base      = vpt_start + 2 * PAGE_SIZE * !!is_pv_32on64_domain(d);
     si->nr_pt_frames = nr_pt_pages;
     si->mfn_list     = vphysmap_start;
     snprintf(si->magic, sizeof(si->magic), "xen-%i.%i-x86_%d%s",
@@ -840,7 +842,7 @@ int construct_dom0(struct domain *d,
         if ( pfn > REVERSE_START )
             mfn = alloc_epfn - (pfn - REVERSE_START);
 #endif
-        if ( !IS_COMPAT(d) )
+        if ( !is_pv_32on64_domain(d) )
             ((unsigned long *)vphysmap_start)[pfn] = mfn;
         else
             ((unsigned int *)vphysmap_start)[pfn] = mfn;
@@ -856,7 +858,7 @@ int construct_dom0(struct domain *d,
 #ifndef NDEBUG
 #define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn)))
 #endif
-            if ( !IS_COMPAT(d) )
+            if ( !is_pv_32on64_domain(d) )
                 ((unsigned long *)vphysmap_start)[pfn] = mfn;
             else
                 ((unsigned int *)vphysmap_start)[pfn] = mfn;
@@ -885,7 +887,7 @@ int construct_dom0(struct domain *d,
     }
 
 #ifdef CONFIG_COMPAT
-    if ( IS_COMPAT(d) )
+    if ( is_pv_32on64_domain(d) )
         xlat_start_info(si, XLAT_start_info_console_dom0);
 #endif
 
@@ -913,11 +915,12 @@ int construct_dom0(struct domain *d,
      *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
      */
     regs = &v->arch.guest_context.user_regs;
-    regs->ds = regs->es = regs->fs = regs->gs = !IS_COMPAT(d)
-                                                ? FLAT_KERNEL_DS
-                                                : FLAT_COMPAT_KERNEL_DS;
-    regs->ss = !IS_COMPAT(d) ? FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS;
-    regs->cs = !IS_COMPAT(d) ? FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS;
+    regs->ds = regs->es = regs->fs = regs->gs =
+        !is_pv_32on64_domain(d) ? FLAT_KERNEL_DS : FLAT_COMPAT_KERNEL_DS;
+    regs->ss = (!is_pv_32on64_domain(d) ?
+                FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS);
+    regs->cs = (!is_pv_32on64_domain(d) ?
+                FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS);
     regs->eip = parms.virt_entry;
     regs->esp = vstack_end;
     regs->esi = vstartinfo_start;
diff -r 53b9883bbcc3 -r 405573aedd24 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/arch/x86/domctl.c     Fri Apr 27 15:06:55 2007 +0100
@@ -435,12 +435,12 @@ void arch_get_info_guest(struct vcpu *v,
 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
 {
 #ifdef CONFIG_COMPAT
-#define c(fld) (!IS_COMPAT(v->domain) ? (c.nat->fld) : (c.cmp->fld))
+#define c(fld) (!is_pv_32on64_domain(v->domain) ? (c.nat->fld) : (c.cmp->fld))
 #else
 #define c(fld) (c.nat->fld)
 #endif
 
-    if ( !IS_COMPAT(v->domain) )
+    if ( !is_pv_32on64_domain(v->domain) )
         memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
 #ifdef CONFIG_COMPAT
     else
@@ -455,7 +455,7 @@ void arch_get_info_guest(struct vcpu *v,
 
     if ( is_hvm_vcpu(v) )
     {
-        if ( !IS_COMPAT(v->domain) )
+        if ( !is_pv_32on64_domain(v->domain) )
             hvm_store_cpu_guest_regs(v, &c.nat->user_regs, c.nat->ctrlreg);
 #ifdef CONFIG_COMPAT
         else
@@ -477,7 +477,7 @@ void arch_get_info_guest(struct vcpu *v,
         BUG_ON((c(user_regs.eflags) & EF_IOPL) != 0);
         c(user_regs.eflags |= v->arch.iopl << 12);
 
-        if ( !IS_COMPAT(v->domain) )
+        if ( !is_pv_32on64_domain(v->domain) )
         {
             c.nat->ctrlreg[3] = xen_pfn_to_cr3(
                 pagetable_get_pfn(v->arch.guest_table));
diff -r 53b9883bbcc3 -r 405573aedd24 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Fri Apr 27 15:06:55 2007 +0100
@@ -1049,15 +1049,13 @@ long do_hvm_op(unsigned long op, XEN_GUE
                 break;
             case HVM_PARAM_CALLBACK_IRQ:
                 hvm_set_callback_via(d, a.value);
-#if defined(__x86_64__)
                 /*
                  * Since this operation is one of the very first executed
                  * by PV drivers on initialisation or after save/restore, it
                  * is a sensible point at which to sample the execution mode of
                  * the guest and latch 32- or 64-bit format for shared state.
                  */
-                d->is_compat = (hvm_guest_x86_mode(current) == 4);
-#endif
+                d->arch.has_32bit_shinfo = (hvm_guest_x86_mode(current) != 8);
                 break;
             }
             d->arch.hvm_domain.params[a.index] = a.value;
diff -r 53b9883bbcc3 -r 405573aedd24 xen/arch/x86/machine_kexec.c
--- a/xen/arch/x86/machine_kexec.c      Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/arch/x86/machine_kexec.c      Fri Apr 27 15:06:55 2007 +0100
@@ -44,9 +44,8 @@ int machine_kexec_load(int type, int slo
         else
         {
             /* Odd pages: va for previous ma. */
-            if ( IS_COMPAT(dom0) )
+            if ( is_pv_32on64_domain(dom0) )
             {
-
                 /*
                  * The compatability bounce code sets up a page table
                  * with a 1-1 mapping of the first 1G of memory so
@@ -119,7 +118,7 @@ void machine_kexec(xen_kexec_image_t *im
 void machine_kexec(xen_kexec_image_t *image)
 {
 #ifdef CONFIG_COMPAT
-    if ( IS_COMPAT(dom0) )
+    if ( is_pv_32on64_domain(dom0) )
     {
         extern void compat_machine_kexec(unsigned long rnk,
                                          unsigned long indirection_page,
diff -r 53b9883bbcc3 -r 405573aedd24 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/arch/x86/mm.c Fri Apr 27 15:06:55 2007 +0100
@@ -149,8 +149,8 @@ unsigned long total_pages;
 
 #ifdef CONFIG_COMPAT
 l2_pgentry_t *compat_idle_pg_table_l2 = NULL;
-#define l3_disallow_mask(d) (!IS_COMPAT(d) ? \
-                             L3_DISALLOW_MASK : \
+#define l3_disallow_mask(d) (!is_pv_32on64_domain(d) ?  \
+                             L3_DISALLOW_MASK :         \
                              COMPAT_L3_DISALLOW_MASK)
 #else
 #define l3_disallow_mask(d) L3_DISALLOW_MASK
@@ -721,7 +721,7 @@ get_page_from_l4e(
 #define adjust_guest_l1e(pl1e, d)                                            \
     do {                                                                     \
         if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) &&                \
-             likely(!IS_COMPAT(d)) )                                         \
+             likely(!is_pv_32on64_domain(d)) )                               \
         {                                                                    \
             /* _PAGE_GUEST_KERNEL page cannot have the Global bit set. */    \
             if ( (l1e_get_flags((pl1e)) & (_PAGE_GUEST_KERNEL|_PAGE_GLOBAL)) \
@@ -738,7 +738,7 @@ get_page_from_l4e(
 #define adjust_guest_l1e(pl1e, d)                               \
     do {                                                        \
         if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) &&   \
-             likely(!IS_COMPAT(d)) )                            \
+             likely(!is_pv_32on64_domain(d)) )                  \
             l1e_add_flags((pl1e), _PAGE_USER);                  \
     } while ( 0 )
 #endif
@@ -746,22 +746,22 @@ get_page_from_l4e(
 #define adjust_guest_l2e(pl2e, d)                               \
     do {                                                        \
         if ( likely(l2e_get_flags((pl2e)) & _PAGE_PRESENT) &&   \
-             likely(!IS_COMPAT(d)) )                            \
+             likely(!is_pv_32on64_domain(d)) )                  \
             l2e_add_flags((pl2e), _PAGE_USER);                  \
     } while ( 0 )
 
-#define adjust_guest_l3e(pl3e, d)                               \
-    do {                                                        \
-        if ( likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )    \
-            l3e_add_flags((pl3e), likely(!IS_COMPAT(d)) ?       \
-                                         _PAGE_USER :           \
-                                         _PAGE_USER|_PAGE_RW);  \
+#define adjust_guest_l3e(pl3e, d)                                   \
+    do {                                                            \
+        if ( likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )        \
+            l3e_add_flags((pl3e), likely(!is_pv_32on64_domain(d)) ? \
+                                         _PAGE_USER :               \
+                                         _PAGE_USER|_PAGE_RW);      \
     } while ( 0 )
 
 #define adjust_guest_l4e(pl4e, d)                               \
     do {                                                        \
         if ( likely(l4e_get_flags((pl4e)) & _PAGE_PRESENT) &&   \
-             likely(!IS_COMPAT(d)) )                            \
+             likely(!is_pv_32on64_domain(d)) )                  \
             l4e_add_flags((pl4e), _PAGE_USER);                  \
     } while ( 0 )
 
@@ -774,11 +774,11 @@ get_page_from_l4e(
 #endif
 
 #ifdef CONFIG_COMPAT
-#define unadjust_guest_l3e(pl3e, d)                             \
-    do {                                                        \
-        if ( unlikely(IS_COMPAT(d)) &&                          \
-             likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )    \
-            l3e_remove_flags((pl3e), _PAGE_USER|_PAGE_RW|_PAGE_ACCESSED); \
+#define unadjust_guest_l3e(pl3e, d)                                         \
+    do {                                                                    \
+        if ( unlikely(is_pv_32on64_domain(d)) &&                            \
+             likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )                \
+            l3e_remove_flags((pl3e), _PAGE_USER|_PAGE_RW|_PAGE_ACCESSED);   \
     } while ( 0 )
 #else
 #define unadjust_guest_l3e(_p, _d) ((void)(_d))
@@ -910,11 +910,10 @@ static int create_pae_xen_mappings(struc
 #ifndef CONFIG_COMPAT
     l2_pgentry_t     l2e;
     int              i;
-#else
-
-    if ( !IS_COMPAT(d) )
+#endif
+
+    if ( !is_pv_32bit_domain(d) )
         return 1;
-#endif
 
     pl3e = (l3_pgentry_t *)((unsigned long)pl3e & PAGE_MASK);
 
@@ -1109,13 +1108,13 @@ static int alloc_l3_table(struct page_in
      * 512 entries must be valid/verified, which is most easily achieved
      * by clearing them out.
      */
-    if ( IS_COMPAT(d) )
+    if ( is_pv_32on64_domain(d) )
         memset(pl3e + 4, 0, (L3_PAGETABLE_ENTRIES - 4) * sizeof(*pl3e));
 
     for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
     {
 #if defined(CONFIG_X86_PAE) || defined(CONFIG_COMPAT)
-        if ( (CONFIG_PAGING_LEVELS < 4 || IS_COMPAT(d)) && i == 3 )
+        if ( is_pv_32bit_domain(d) && (i == 3) )
         {
             if ( !(l3e_get_flags(pl3e[i]) & _PAGE_PRESENT) ||
                  (l3e_get_flags(pl3e[i]) & l3_disallow_mask(d)) ||
@@ -1179,7 +1178,7 @@ static int alloc_l4_table(struct page_in
     pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
         l4e_from_page(virt_to_page(d->arch.mm_perdomain_l3),
                       __PAGE_HYPERVISOR);
-    if ( IS_COMPAT(d) )
+    if ( is_pv_32on64_domain(d) )
         pl4e[l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
             l4e_from_page(virt_to_page(d->arch.mm_arg_xlat_l3),
                           __PAGE_HYPERVISOR);
@@ -1446,8 +1445,7 @@ static int mod_l3_entry(l3_pgentry_t *pl
      * Disallow updates to final L3 slot. It contains Xen mappings, and it
      * would be a pain to ensure they remain continuously valid throughout.
      */
-    if ( (CONFIG_PAGING_LEVELS < 4 || IS_COMPAT(d)) &&
-         pgentry_ptr_to_slot(pl3e) >= 3 )
+    if ( is_pv_32bit_domain(d) && (pgentry_ptr_to_slot(pl3e) >= 3) )
         return 0;
 #endif 
 
@@ -1794,7 +1792,7 @@ int new_guest_cr3(unsigned long mfn)
     unsigned long old_base_mfn;
 
 #ifdef CONFIG_COMPAT
-    if ( IS_COMPAT(d) )
+    if ( is_pv_32on64_domain(d) )
     {
         okay = paging_mode_refcounts(d)
             ? 0 /* Old code was broken, but what should it be? */
@@ -2026,7 +2024,7 @@ int do_mmuext_op(
             goto pin_page;
 
         case MMUEXT_PIN_L4_TABLE:
-            if ( IS_COMPAT(FOREIGNDOM) )
+            if ( is_pv_32bit_domain(FOREIGNDOM) )
                 break;
             type = PGT_l4_page_table;
 
@@ -2771,7 +2769,7 @@ int do_update_va_mapping(unsigned long v
             flush_tlb_mask(d->domain_dirty_cpumask);
             break;
         default:
-            if ( unlikely(!IS_COMPAT(d) ?
+            if ( unlikely(!is_pv_32on64_domain(d) ?
                           get_user(vmask, (unsigned long *)bmap_ptr) :
                           get_user(vmask, (unsigned int *)bmap_ptr)) )
                 rc = -EFAULT;
@@ -2793,7 +2791,7 @@ int do_update_va_mapping(unsigned long v
             flush_tlb_one_mask(d->domain_dirty_cpumask, va);
             break;
         default:
-            if ( unlikely(!IS_COMPAT(d) ?
+            if ( unlikely(!is_pv_32on64_domain(d) ?
                           get_user(vmask, (unsigned long *)bmap_ptr) :
                           get_user(vmask, (unsigned int *)bmap_ptr)) )
                 rc = -EFAULT;
@@ -3250,7 +3248,7 @@ static int ptwr_emulated_update(
     nl1e = l1e_from_intpte(val);
     if ( unlikely(!get_page_from_l1e(gl1e_to_ml1e(d, nl1e), d)) )
     {
-        if ( (CONFIG_PAGING_LEVELS == 3 || IS_COMPAT(d)) &&
+        if ( (CONFIG_PAGING_LEVELS >= 3) && is_pv_32bit_domain(d) &&
              (bytes == 4) && (addr & 4) && !do_cmpxchg &&
              (l1e_get_flags(nl1e) & _PAGE_PRESENT) )
         {
@@ -3387,7 +3385,7 @@ int ptwr_do_page_fault(struct vcpu *v, u
 
     ptwr_ctxt.ctxt.regs = regs;
     ptwr_ctxt.ctxt.addr_size = ptwr_ctxt.ctxt.sp_size =
-        IS_COMPAT(d) ? 32 : BITS_PER_LONG;
+        is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;
     ptwr_ctxt.cr2 = addr;
     ptwr_ctxt.pte = pte;
 
diff -r 53b9883bbcc3 -r 405573aedd24 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Fri Apr 27 15:06:55 2007 +0100
@@ -1577,7 +1577,7 @@ void sh_destroy_shadow(struct vcpu *v, m
            t == SH_type_fl1_pae_shadow ||  
            t == SH_type_fl1_64_shadow  || 
            t == SH_type_monitor_table  || 
-           (pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
+           (is_pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
            (page_get_owner(mfn_to_page(_mfn(sp->backpointer))) 
             == v->domain)); 
 
@@ -1620,7 +1620,7 @@ void sh_destroy_shadow(struct vcpu *v, m
         SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4, 4)(v, smfn);
         break;
     case SH_type_l2h_64_shadow:
-        ASSERT(pv_32on64_vcpu(v));
+        ASSERT(is_pv_32on64_vcpu(v));
         /* Fall through... */
     case SH_type_l2_64_shadow:
         SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4, 4)(v, smfn);
@@ -2717,7 +2717,7 @@ static int shadow_log_dirty_enable(struc
     /* 32bit PV guests on 64bit xen behave like older 64bit linux: they
      * change an l4e instead of cr3 to switch tables.  Give them the
      * same optimization */
-    if ( pv_32on64_domain(d) )
+    if ( is_pv_32on64_domain(d) )
         d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
 #endif
 
diff -r 53b9883bbcc3 -r 405573aedd24 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Fri Apr 27 15:06:55 2007 +0100
@@ -135,7 +135,7 @@ set_shadow_status(struct vcpu *v, mfn_t 
                    shadow_type, mfn_x(smfn));
 
     /* 32-on-64 PV guests don't own their l4 pages so can't get_page them */
-    if ( !pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow )
+    if ( !is_pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow )
     {
         res = get_page(mfn_to_page(gmfn), d);
         ASSERT(res == 1);
@@ -162,7 +162,7 @@ delete_shadow_status(struct vcpu *v, mfn
                    mfn_x(gmfn), shadow_type, mfn_x(smfn));
     shadow_hash_delete(v, mfn_x(gmfn), shadow_type, smfn);
     /* 32-on-64 PV guests don't own their l4 pages; see set_shadow_status */
-    if ( !pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow )
+    if ( !is_pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow )
         put_page(mfn_to_page(gmfn));
 }
 
@@ -744,7 +744,7 @@ _sh_propagate(struct vcpu *v,
     // PV guests in 64-bit mode use two different page tables for user vs
     // supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
     // It is always shadowed as present...
-    if ( (GUEST_PAGING_LEVELS == 4) && !pv_32on64_domain(d) 
+    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d) 
          && !is_hvm_domain(d) )
     {
         sflags |= _PAGE_USER;
@@ -1299,7 +1299,7 @@ do {                                    
     for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \
     {                                                                       \
         if ( (!(_xen))                                                      \
-             || !pv_32on64_domain(_dom)                                     \
+             || !is_pv_32on64_domain(_dom)                                  \
              || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_64_shadow  \
              || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) )           \
         {                                                                   \
@@ -1410,7 +1410,7 @@ void sh_install_xen_entries_in_l4(struct
                                 __PAGE_HYPERVISOR);
     }
 
-    if ( pv_32on64_domain(v->domain) )
+    if ( is_pv_32on64_domain(v->domain) )
     {
         /* install compat arg xlat entry */
         sl4e[shadow_l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
@@ -1436,7 +1436,7 @@ static void sh_install_xen_entries_in_l2
     int i;
 #else
 
-    if ( !pv_32on64_vcpu(v) )
+    if ( !is_pv_32on64_vcpu(v) )
         return;
 #endif
 
@@ -1681,7 +1681,7 @@ sh_make_monitor_table(struct vcpu *v)
             l4e = sh_map_domain_page(m4mfn);
             l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
             sh_unmap_domain_page(l4e);
-            if ( pv_32on64_vcpu(v) )
+            if ( is_pv_32on64_vcpu(v) )
             {
                 // Install a monitor l2 table in slot 3 of the l3 table.
                 // This is used for all Xen entries.
@@ -1837,7 +1837,7 @@ static shadow_l2e_t * shadow_get_and_cre
         unsigned int t = SH_type_l2_shadow;
 
         /* Tag compat L2 containing hypervisor (m2p) mappings */
-        if ( pv_32on64_domain(v->domain) &&
+        if ( is_pv_32on64_domain(v->domain) &&
              guest_l4_table_offset(gw->va) == 0 &&
              guest_l3_table_offset(gw->va) == 3 )
             t = SH_type_l2h_shadow;
@@ -2106,7 +2106,7 @@ void sh_destroy_monitor_table(struct vcp
         l4_pgentry_t *l4e = sh_map_domain_page(mmfn);
         ASSERT(l4e_get_flags(l4e[0]) & _PAGE_PRESENT);
         m3mfn = _mfn(l4e_get_pfn(l4e[0]));
-        if ( pv_32on64_vcpu(v) )
+        if ( is_pv_32on64_vcpu(v) )
         {
             /* Need to destroy the l2 monitor page in slot 3 too */
             l3_pgentry_t *l3e = sh_map_domain_page(m3mfn);
@@ -3469,7 +3469,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
                    (unsigned long)pagetable_get_pfn(v->arch.guest_table));
 
 #if GUEST_PAGING_LEVELS == 4
-    if ( !(v->arch.flags & TF_kernel_mode) && !pv_32on64_vcpu(v) )
+    if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32on64_vcpu(v) )
         gmfn = pagetable_get_mfn(v->arch.guest_table_user);
     else
 #endif
@@ -4280,7 +4280,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
             mfn = shadow_l3e_get_mfn(*sl3e);
             gmfn = get_shadow_status(v, audit_gfn_to_mfn(v, gfn, gl3mfn), 
                                      ((GUEST_PAGING_LEVELS == 3 ||
-                                       pv_32on64_vcpu(v))
+                                       is_pv_32on64_vcpu(v))
                                       && !shadow_mode_external(v->domain)
                                       && (guest_index(gl3e) % 4) == 3)
                                      ? SH_type_l2h_shadow
diff -r 53b9883bbcc3 -r 405573aedd24 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/arch/x86/traps.c      Fri Apr 27 15:06:55 2007 +0100
@@ -124,7 +124,7 @@ static void show_guest_stack(struct cpu_
     if ( is_hvm_vcpu(current) )
         return;
 
-    if ( IS_COMPAT(container_of(regs, struct cpu_info, 
guest_cpu_user_regs)->current_vcpu->domain) )
+    if ( is_pv_32on64_vcpu(current) )
     {
         compat_show_guest_stack(regs, debug_stack_lines);
         return;
@@ -1568,7 +1568,7 @@ static int emulate_privileged_op(struct 
             break;
             
         case 3: /* Read CR3 */
-            if ( !IS_COMPAT(v->domain) )
+            if ( !is_pv_32on64_vcpu(v) )
                 *reg = xen_pfn_to_cr3(mfn_to_gmfn(
                     v->domain, pagetable_get_pfn(v->arch.guest_table)));
 #ifdef CONFIG_COMPAT
@@ -1625,7 +1625,7 @@ static int emulate_privileged_op(struct 
 
         case 3: /* Write CR3 */
             LOCK_BIGLOCK(v->domain);
-            if ( !IS_COMPAT(v->domain) )
+            if ( !is_pv_32on64_vcpu(v) )
                 rc = new_guest_cr3(gmfn_to_mfn(v->domain, 
xen_cr3_to_pfn(*reg)));
 #ifdef CONFIG_COMPAT
             else
@@ -1663,7 +1663,7 @@ static int emulate_privileged_op(struct 
         {
 #ifdef CONFIG_X86_64
         case MSR_FS_BASE:
-            if ( IS_COMPAT(v->domain) )
+            if ( is_pv_32on64_vcpu(v) )
                 goto fail;
             if ( wrmsr_safe(MSR_FS_BASE, regs->eax, regs->edx) )
                 goto fail;
@@ -1671,7 +1671,7 @@ static int emulate_privileged_op(struct 
                 ((u64)regs->edx << 32) | regs->eax;
             break;
         case MSR_GS_BASE:
-            if ( IS_COMPAT(v->domain) )
+            if ( is_pv_32on64_vcpu(v) )
                 goto fail;
             if ( wrmsr_safe(MSR_GS_BASE, regs->eax, regs->edx) )
                 goto fail;
@@ -1679,7 +1679,7 @@ static int emulate_privileged_op(struct 
                 ((u64)regs->edx << 32) | regs->eax;
             break;
         case MSR_SHADOW_GS_BASE:
-            if ( IS_COMPAT(v->domain) )
+            if ( is_pv_32on64_vcpu(v) )
                 goto fail;
             if ( wrmsr_safe(MSR_SHADOW_GS_BASE, regs->eax, regs->edx) )
                 goto fail;
@@ -1705,19 +1705,19 @@ static int emulate_privileged_op(struct 
         {
 #ifdef CONFIG_X86_64
         case MSR_FS_BASE:
-            if ( IS_COMPAT(v->domain) )
+            if ( is_pv_32on64_vcpu(v) )
                 goto fail;
             regs->eax = v->arch.guest_context.fs_base & 0xFFFFFFFFUL;
             regs->edx = v->arch.guest_context.fs_base >> 32;
             break;
         case MSR_GS_BASE:
-            if ( IS_COMPAT(v->domain) )
+            if ( is_pv_32on64_vcpu(v) )
                 goto fail;
             regs->eax = v->arch.guest_context.gs_base_kernel & 0xFFFFFFFFUL;
             regs->edx = v->arch.guest_context.gs_base_kernel >> 32;
             break;
         case MSR_SHADOW_GS_BASE:
-            if ( IS_COMPAT(v->domain) )
+            if ( is_pv_32on64_vcpu(v) )
                 goto fail;
             regs->eax = v->arch.guest_context.gs_base_user & 0xFFFFFFFFUL;
             regs->edx = v->arch.guest_context.gs_base_user >> 32;
diff -r 53b9883bbcc3 -r 405573aedd24 xen/arch/x86/x86_64/asm-offsets.c
--- a/xen/arch/x86/x86_64/asm-offsets.c Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/arch/x86/x86_64/asm-offsets.c Fri Apr 27 15:06:55 2007 +0100
@@ -91,7 +91,7 @@ void __dummy__(void)
     OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
     BLANK();
 
-    OFFSET(DOMAIN_is_compat, struct domain, is_compat);
+    OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
     BLANK();
 
     OFFSET(VMCB_rax, struct vmcb_struct, rax);
diff -r 53b9883bbcc3 -r 405573aedd24 xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S       Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/arch/x86/x86_64/entry.S       Fri Apr 27 15:06:55 2007 +0100
@@ -235,7 +235,7 @@ ENTRY(int80_direct_trap)
         jz    int80_slow_path
 
         movq  VCPU_domain(%rbx),%rax
-        testb $1,DOMAIN_is_compat(%rax)
+        testb $1,DOMAIN_is_32bit_pv(%rax)
         jnz   compat_int80_direct_trap
 
         call  create_bounce_frame
@@ -356,7 +356,7 @@ ENTRY(domain_crash_synchronous)
         # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
         movq  CPUINFO_current_vcpu(%rax),%rax
         movq  VCPU_domain(%rax),%rax
-        testb $1,DOMAIN_is_compat(%rax)
+        testb $1,DOMAIN_is_32bit_pv(%rax)
         setz  %al
         leal  (%rax,%rax,2),%eax
         orb   %al,UREGS_cs(%rsp)
@@ -373,7 +373,7 @@ ENTRY(ret_from_intr)
         testb $3,UREGS_cs(%rsp)
         jz    restore_all_xen
         movq  VCPU_domain(%rbx),%rax
-        testb $1,DOMAIN_is_compat(%rax)
+        testb $1,DOMAIN_is_32bit_pv(%rax)
         jz    test_all_events
         jmp   compat_test_all_events
 
@@ -395,7 +395,7 @@ 1:      movq  %rsp,%rdi
         jz    restore_all_xen
         leaq  VCPU_trap_bounce(%rbx),%rdx
         movq  VCPU_domain(%rbx),%rax
-        testb $1,DOMAIN_is_compat(%rax)
+        testb $1,DOMAIN_is_32bit_pv(%rax)
         jnz   compat_post_handle_exception
         testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
         jz    test_all_events
diff -r 53b9883bbcc3 -r 405573aedd24 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/arch/x86/x86_64/mm.c  Fri Apr 27 15:06:55 2007 +0100
@@ -384,9 +384,9 @@ int check_descriptor(const struct domain
     /* All code and data segments are okay. No base/limit checking. */
     if ( (b & _SEGMENT_S) )
     {
-        if ( !IS_COMPAT(dom) || !(b & _SEGMENT_L) )
-            goto good;
-        goto bad;
+        if ( is_pv_32bit_domain(dom) && (b & _SEGMENT_L) )
+            goto bad;
+        goto good;
     }
 
     /* Invalid type 0 is harmless. It is used for 2nd half of a call gate. */
diff -r 53b9883bbcc3 -r 405573aedd24 xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c       Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/arch/x86/x86_64/traps.c       Fri Apr 27 15:06:55 2007 +0100
@@ -179,7 +179,7 @@ asmlinkage void do_double_fault(struct c
 
 void toggle_guest_mode(struct vcpu *v)
 {
-    if ( IS_COMPAT(v->domain) )
+    if ( is_pv_32bit_vcpu(v) )
         return;
     v->arch.flags ^= TF_kernel_mode;
     __asm__ __volatile__ ( "swapgs" );
@@ -534,7 +534,7 @@ void hypercall_page_initialise(struct do
 {
     if ( is_hvm_domain(d) )
         hvm_hypercall_page_initialise(d, hypercall_page);
-    else if ( !IS_COMPAT(d) )
+    else if ( !is_pv_32bit_domain(d) )
         hypercall_page_initialise_ring3_kernel(hypercall_page);
     else
         hypercall_page_initialise_ring1_kernel(hypercall_page);
diff -r 53b9883bbcc3 -r 405573aedd24 xen/include/asm-x86/desc.h
--- a/xen/include/asm-x86/desc.h        Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/include/asm-x86/desc.h        Fri Apr 27 15:06:55 2007 +0100
@@ -64,7 +64,7 @@
 #define load_TR(n)  __asm__ __volatile__ ("ltr  %%ax" : : "a" (__TSS(n)<<3) )
 
 #if defined(__x86_64__)
-#define GUEST_KERNEL_RPL(d) (!IS_COMPAT(d) ? 3 : 1)
+#define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
 #elif defined(__i386__)
 #define GUEST_KERNEL_RPL(d) ((void)(d), 1)
 #endif
@@ -104,7 +104,7 @@
  */
 #define guest_gate_selector_okay(d, sel)                                \
     ((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */        \
-     ((sel) == (!IS_COMPAT(d) ?                                         \
+     ((sel) == (!is_pv_32on64_domain(d) ?                               \
                 FLAT_KERNEL_CS :                /* Xen default seg? */  \
                 FLAT_COMPAT_KERNEL_CS)) ||                              \
      ((sel) & 4))                               /* LDT seg? */
diff -r 53b9883bbcc3 -r 405573aedd24 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/include/asm-x86/domain.h      Fri Apr 27 15:06:55 2007 +0100
@@ -7,18 +7,16 @@
 #include <asm/hvm/domain.h>
 #include <asm/e820.h>
 
+#define has_32bit_shinfo(d)    ((d)->arch.has_32bit_shinfo)
+#define is_pv_32bit_domain(d)  ((d)->arch.is_32bit_pv)
+#define is_pv_32bit_vcpu(v)    (is_pv_32bit_domain((v)->domain))
 #ifdef __x86_64__
-#define pv_32bit_vcpu(v)    (!is_hvm_vcpu(v) && IS_COMPAT((v)->domain))
-#define pv_32bit_domain(d)  (!is_hvm_domain(d) && IS_COMPAT(d))
-#define pv_32on64_vcpu(v)   (pv_32bit_vcpu(v))
-#define pv_32on64_domain(d) (pv_32bit_domain(d))
+#define is_pv_32on64_domain(d) (is_pv_32bit_domain(d))
 #else
-#define pv_32bit_vcpu(v)    (!is_hvm_vcpu(v))
-#define pv_32bit_domain(d)  (!is_hvm_domain(d))
-#define pv_32on64_vcpu(v)   (0)
-#define pv_32on64_domain(d) (0)
-#endif
-
+#define is_pv_32on64_domain(d) (0)
+#endif
+#define is_pv_32on64_vcpu(v)   (is_pv_32on64_domain((v)->domain))
+#define IS_COMPAT(d)           (is_pv_32on64_domain(d))
 
 struct trap_bounce {
     uint32_t      error_code;
@@ -213,6 +211,11 @@ struct arch_domain
 
     /* Maximum physical-address bitwidth supported by this guest. */
     unsigned int physaddr_bitsize;
+
+    /* Is a 32-bit PV (non-HVM) guest? */
+    bool_t is_32bit_pv;
+    /* Is shared-info page in 32-bit format? */
+    bool_t has_32bit_shinfo;
 } __cacheline_aligned;
 
 #ifdef CONFIG_X86_PAE
diff -r 53b9883bbcc3 -r 405573aedd24 xen/include/asm-x86/ldt.h
--- a/xen/include/asm-x86/ldt.h Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/include/asm-x86/ldt.h Fri Apr 27 15:06:55 2007 +0100
@@ -17,7 +17,7 @@ static inline void load_LDT(struct vcpu 
     else
     {
         cpu = smp_processor_id();
-        desc = (!IS_COMPAT(v->domain) ? gdt_table : compat_gdt_table)
+        desc = (!is_pv_32on64_vcpu(v) ? gdt_table : compat_gdt_table)
                + __LDT(cpu) - FIRST_RESERVED_GDT_ENTRY;
         _set_tssldt_desc(desc, LDT_VIRT_START(v), ents*8-1, 2);
         __asm__ __volatile__ ( "lldt %%ax" : : "a" (__LDT(cpu)<<3) );
diff -r 53b9883bbcc3 -r 405573aedd24 xen/include/asm-x86/shared.h
--- a/xen/include/asm-x86/shared.h      Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/include/asm-x86/shared.h      Fri Apr 27 15:06:55 2007 +0100
@@ -3,66 +3,66 @@
 
 #ifdef CONFIG_COMPAT
 
-#define nmi_reason(d) (!IS_COMPAT(d) ? \
+#define nmi_reason(d) (!has_32bit_shinfo(d) ?                              \
                        (void *)&(d)->shared_info->native.arch.nmi_reason : \
                        (void *)&(d)->shared_info->compat.arch.nmi_reason)
 
-#define GET_SET_SHARED(type, field) \
-static inline type arch_get_##field(const struct domain *d) \
-{ \
-    return !IS_COMPAT(d) ? \
-           d->shared_info->native.arch.field : \
-           d->shared_info->compat.arch.field; \
-} \
-static inline void arch_set_##field(struct domain *d, \
-                                    type val) \
-{ \
-    if ( !IS_COMPAT(d) ) \
-        d->shared_info->native.arch.field = val; \
-    else \
-        d->shared_info->compat.arch.field = val; \
+#define GET_SET_SHARED(type, field)                             \
+static inline type arch_get_##field(const struct domain *d)     \
+{                                                               \
+    return !has_32bit_shinfo(d) ?                               \
+           d->shared_info->native.arch.field :                  \
+           d->shared_info->compat.arch.field;                   \
+}                                                               \
+static inline void arch_set_##field(struct domain *d,           \
+                                    type val)                   \
+{                                                               \
+    if ( !has_32bit_shinfo(d) )                                 \
+        d->shared_info->native.arch.field = val;                \
+    else                                                        \
+        d->shared_info->compat.arch.field = val;                \
 }
 
-#define GET_SET_VCPU(type, field) \
-static inline type arch_get_##field(const struct vcpu *v) \
-{ \
-    return !IS_COMPAT(v->domain) ? \
-           v->vcpu_info->native.arch.field : \
-           v->vcpu_info->compat.arch.field; \
-} \
-static inline void arch_set_##field(struct vcpu *v, \
-                                    type val) \
-{ \
-    if ( !IS_COMPAT(v->domain) ) \
-        v->vcpu_info->native.arch.field = val; \
-    else \
-        v->vcpu_info->compat.arch.field = val; \
+#define GET_SET_VCPU(type, field)                               \
+static inline type arch_get_##field(const struct vcpu *v)       \
+{                                                               \
+    return !has_32bit_shinfo(v->domain) ?                       \
+           v->vcpu_info->native.arch.field :                    \
+           v->vcpu_info->compat.arch.field;                     \
+}                                                               \
+static inline void arch_set_##field(struct vcpu *v,             \
+                                    type val)                   \
+{                                                               \
+    if ( !has_32bit_shinfo(v->domain) )                         \
+        v->vcpu_info->native.arch.field = val;                  \
+    else                                                        \
+        v->vcpu_info->compat.arch.field = val;                  \
 }
 
 #else
 
 #define nmi_reason(d) ((void *)&(d)->shared_info->arch.nmi_reason)
 
-#define GET_SET_SHARED(type, field) \
-static inline type arch_get_##field(const struct domain *d) \
-{ \
-    return d->shared_info->arch.field; \
-} \
-static inline void arch_set_##field(struct domain *d, \
-                                    type val) \
-{ \
-    d->shared_info->arch.field = val; \
+#define GET_SET_SHARED(type, field)                             \
+static inline type arch_get_##field(const struct domain *d)     \
+{                                                               \
+    return d->shared_info->arch.field;                          \
+}                                                               \
+static inline void arch_set_##field(struct domain *d,           \
+                                    type val)                   \
+{                                                               \
+    d->shared_info->arch.field = val;                           \
 }
 
-#define GET_SET_VCPU(type, field) \
-static inline type arch_get_##field(const struct vcpu *v) \
-{ \
-    return v->vcpu_info->arch.field; \
-} \
-static inline void arch_set_##field(struct vcpu *v, \
-                                    type val) \
-{ \
-    v->vcpu_info->arch.field = val; \
+#define GET_SET_VCPU(type, field)                               \
+static inline type arch_get_##field(const struct vcpu *v)       \
+{                                                               \
+    return v->vcpu_info->arch.field;                            \
+}                                                               \
+static inline void arch_set_##field(struct vcpu *v,             \
+                                    type val)                   \
+{                                                               \
+    v->vcpu_info->arch.field = val;                             \
 }
 #endif
 
diff -r 53b9883bbcc3 -r 405573aedd24 xen/include/asm-x86/x86_64/page.h
--- a/xen/include/asm-x86/x86_64/page.h Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/include/asm-x86/x86_64/page.h Fri Apr 27 15:06:55 2007 +0100
@@ -55,12 +55,12 @@ typedef l4_pgentry_t root_pgentry_t;
 
 #define is_guest_l1_slot(_s) (1)
 #define is_guest_l2_slot(_d, _t, _s)                   \
-    ( !IS_COMPAT(_d) ||                                \
+    ( !is_pv_32bit_domain(_d) ||                       \
       !((_t) & PGT_pae_xen_l2) ||                      \
       ((_s) < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_d)) )
 #define is_guest_l3_slot(_s) (1)
 #define is_guest_l4_slot(_d, _s)                    \
-    ( IS_COMPAT(_d)                                 \
+    ( is_pv_32bit_domain(_d)                        \
       ? ((_s) == 0)                                 \
       : (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) ||  \
          ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT)))
diff -r 53b9883bbcc3 -r 405573aedd24 xen/include/asm-x86/x86_64/regs.h
--- a/xen/include/asm-x86/x86_64/regs.h Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/include/asm-x86/x86_64/regs.h Fri Apr 27 15:06:55 2007 +0100
@@ -10,17 +10,17 @@
 #define ring_2(r)    (((r)->cs & 3) == 2)
 #define ring_3(r)    (((r)->cs & 3) == 3)
 
-#define guest_kernel_mode(v, r)   \
-    (!IS_COMPAT((v)->domain) ? \
-     ring_3(r) && ((v)->arch.flags & TF_kernel_mode) : \
-     ring_1(r))
+#define guest_kernel_mode(v, r)                                 \
+    (!is_pv_32bit_vcpu(v) ?                                     \
+     (ring_3(r) && ((v)->arch.flags & TF_kernel_mode)) :        \
+     (ring_1(r)))
 
 #define permit_softint(dpl, v, r) \
     ((dpl) >= (guest_kernel_mode(v, r) ? 1 : 3))
 
 /* Check for null trap callback handler: Is the EIP null? */
 #define null_trap_bounce(v, tb) \
-    (!IS_COMPAT((v)->domain) ? (tb)->eip == 0 : ((tb)->cs & ~3) == 0)
+    (!is_pv_32bit_vcpu(v) ? ((tb)->eip == 0) : (((tb)->cs & ~3) == 0))
 
 /* Number of bytes of on-stack execution state to be context-switched. */
 /* NB. Segment registers and bases are not saved/restored on x86/64 stack. */
diff -r 53b9883bbcc3 -r 405573aedd24 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/include/xen/sched.h   Fri Apr 27 15:06:55 2007 +0100
@@ -188,8 +188,6 @@ struct domain
     bool_t           is_privileged;
     /* Is this guest being debugged by dom0? */
     bool_t           debugger_attached;
-    /* Is a 'compatibility mode' guest (semantics are arch specific)? */
-    bool_t           is_compat;
     /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
     bool_t           is_polling;
     /* Is this guest dying (i.e., a zombie)? */
@@ -489,10 +487,8 @@ static inline void vcpu_unblock(struct v
 
 #define IS_PRIV(_d) ((_d)->is_privileged)
 
-#ifdef CONFIG_COMPAT
-#define IS_COMPAT(_d) ((_d)->is_compat)
-#else
-#define IS_COMPAT(_d) 0
+#ifndef IS_COMPAT
+#define IS_COMPAT(d) 0
 #endif
 
 #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
diff -r 53b9883bbcc3 -r 405573aedd24 xen/include/xen/shared.h
--- a/xen/include/xen/shared.h  Fri Apr 27 14:45:06 2007 +0100
+++ b/xen/include/xen/shared.h  Fri Apr 27 15:06:55 2007 +0100
@@ -12,25 +12,27 @@ typedef union {
     struct compat_shared_info compat;
 } shared_info_t;
 
-#define __shared_info(d, s, field)      (*(!IS_COMPAT(d) ? \
-                                           &(s)->native.field : \
+#define __shared_info(d, s, field)      (*(!has_32bit_shinfo(d) ?       \
+                                           &(s)->native.field :         \
                                            &(s)->compat.field))
-#define __shared_info_addr(d, s, field) (!IS_COMPAT(d) ? \
-                                         (void *)&(s)->native.field : \
+#define __shared_info_addr(d, s, field) (!has_32bit_shinfo(d) ?         \
+                                         (void *)&(s)->native.field :   \
                                          (void *)&(s)->compat.field)
 
-#define shared_info(d, field)      __shared_info(d, (d)->shared_info, field)
-#define shared_info_addr(d, field) __shared_info_addr(d, (d)->shared_info, 
field)
+#define shared_info(d, field)                   \
+    __shared_info(d, (d)->shared_info, field)
+#define shared_info_addr(d, field)                      \
+    __shared_info_addr(d, (d)->shared_info, field)
 
 typedef union {
     struct vcpu_info native;
     struct compat_vcpu_info compat;
 } vcpu_info_t;
 
-#define vcpu_info(v, field)      (*(!IS_COMPAT((v)->domain) ? \
-                                    &(v)->vcpu_info->native.field : \
+#define vcpu_info(v, field)      (*(!has_32bit_shinfo((v)->domain) ?    \
+                                    &(v)->vcpu_info->native.field :     \
                                     &(v)->vcpu_info->compat.field))
-#define vcpu_info_addr(v, field) (!IS_COMPAT((v)->domain) ? \
+#define vcpu_info_addr(v, field) (!has_32bit_shinfo((v)->domain) ?        \
                                   (void *)&(v)->vcpu_info->native.field : \
                                   (void *)&(v)->vcpu_info->compat.field)
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.