[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xen: Remove x86_32 build target.


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Fri, 14 Sep 2012 10:55:31 +0000
  • Delivery-date: Fri, 14 Sep 2012 10:55:59 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1347452970 -3600
# Node ID bc8cb47787025aaa987a5a01719d014d8ede8665
# Parent  05d82fb18335ecf4abf5ab9483d2f9929e478a54
xen: Remove x86_32 build target.

Signed-off-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 05d82fb18335 -r bc8cb4778702 xen/Makefile
--- a/xen/Makefile      Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/Makefile      Wed Sep 12 13:29:30 2012 +0100
@@ -22,7 +22,11 @@ dist: install
 
 .PHONY: build install uninstall clean distclean cscope TAGS tags MAP gtags
 build install uninstall debug clean distclean cscope TAGS tags MAP gtags::
+ifneq ($(XEN_TARGET_ARCH),x86_32)
        $(MAKE) -f Rules.mk _$@
+else
+       echo "*** Xen x86/32 target no longer supported!"
+endif
 
 .PHONY: _build
 _build: $(TARGET).gz
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/Makefile
--- a/xen/arch/x86/Makefile     Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/Makefile     Wed Sep 12 13:29:30 2012 +0100
@@ -5,7 +5,6 @@ subdir-y += hvm
 subdir-y += mm
 subdir-y += oprofile
 
-subdir-$(x86_32) += x86_32
 subdir-$(x86_64) += x86_64
 
 obj-y += apic.o
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/Rules.mk
--- a/xen/arch/x86/Rules.mk     Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/Rules.mk     Wed Sep 12 13:29:30 2012 +0100
@@ -41,22 +41,15 @@ CFLAGS += -DCONFIG_X86_SUPERVISOR_MODE_K
 endif
 
 x86 := y
+x86_32 := n
+x86_64 := y
 
-ifeq ($(TARGET_SUBARCH),x86_32)
-x86_32 := y
-x86_64 := n
-endif
-
-ifeq ($(TARGET_SUBARCH),x86_64)
 CFLAGS += -mno-red-zone -mno-sse -fpic
 CFLAGS += -fno-asynchronous-unwind-tables
 # -fvisibility=hidden reduces -fpic cost, if it's available
 ifneq ($(call cc-option,$(CC),-fvisibility=hidden,n),n)
 CFLAGS += -DGCC_HAS_VISIBILITY_ATTRIBUTE
 endif
-x86_32 := n
-x86_64 := y
-endif
 
 # Require GCC v3.4+ (to avoid issues with alignment constraints in Xen headers)
 check-$(gcc) = $(call cc-ver-check,CC,0x030400,"Xen requires at least gcc-3.4")
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/acpi/boot.c
--- a/xen/arch/x86/acpi/boot.c  Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/acpi/boot.c  Wed Sep 12 13:29:30 2012 +0100
@@ -583,183 +583,6 @@ static void __init acpi_process_madt(voi
        return;
 }
 
-#ifdef __i386__
-
-static int __init disable_acpi_irq(struct dmi_system_id *d)
-{
-       if (!acpi_force) {
-               printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
-                      d->ident);
-               acpi_noirq_set();
-       }
-       return 0;
-}
-
-static int __init dmi_disable_acpi(struct dmi_system_id *d)
-{
-       if (!acpi_force) {
-               printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
-               disable_acpi();
-       } else {
-               printk(KERN_NOTICE
-                      "Warning: DMI blacklist says broken, but acpi forced\n");
-       }
-       return 0;
-}
-
-/*
- * Limit ACPI to CPU enumeration for HT
- */
-static int __init force_acpi_ht(struct dmi_system_id *d)
-{
-       if (!acpi_force) {
-               printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
-                      d->ident);
-               disable_acpi();
-               acpi_ht = 1;
-       } else {
-               printk(KERN_NOTICE
-                      "Warning: acpi=force overrules DMI blacklist: 
acpi=ht\n");
-       }
-       return 0;
-}
-
-/*
- * If your system is blacklisted here, but you find that acpi=force
- * works for you, please contact acpi-devel@xxxxxxxxxxxxxxx
- */
-static struct dmi_system_id __initdata acpi_dmi_table[] = {
-       /*
-        * Boxes that need ACPI disabled
-        */
-       {
-        .callback = dmi_disable_acpi,
-        .ident = "IBM Thinkpad",
-        .matches = {
-                    DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-                    DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
-                    },
-        },
-
-       /*
-        * Boxes that need acpi=ht
-        */
-       {
-        .callback = force_acpi_ht,
-        .ident = "FSC Primergy T850",
-        .matches = {
-                    DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
-                    DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
-                    },
-        },
-       {
-        .callback = force_acpi_ht,
-        .ident = "DELL GX240",
-        .matches = {
-                    DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
-                    DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
-                    },
-        },
-       {
-        .callback = force_acpi_ht,
-        .ident = "HP VISUALIZE NT Workstation",
-        .matches = {
-                    DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
-                    DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
-                    },
-        },
-       {
-        .callback = force_acpi_ht,
-        .ident = "Compaq Workstation W8000",
-        .matches = {
-                    DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
-                    DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
-                    },
-        },
-       {
-        .callback = force_acpi_ht,
-        .ident = "ASUS P4B266",
-        .matches = {
-                    DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-                    DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
-                    },
-        },
-       {
-        .callback = force_acpi_ht,
-        .ident = "ASUS P2B-DS",
-        .matches = {
-                    DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-                    DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
-                    },
-        },
-       {
-        .callback = force_acpi_ht,
-        .ident = "ASUS CUR-DLS",
-        .matches = {
-                    DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-                    DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
-                    },
-        },
-       {
-        .callback = force_acpi_ht,
-        .ident = "ABIT i440BX-W83977",
-        .matches = {
-                    DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
-                    DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
-                    },
-        },
-       {
-        .callback = force_acpi_ht,
-        .ident = "IBM Bladecenter",
-        .matches = {
-                    DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-                    DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
-                    },
-        },
-       {
-        .callback = force_acpi_ht,
-        .ident = "IBM eServer xSeries 360",
-        .matches = {
-                    DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-                    DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
-                    },
-        },
-       {
-        .callback = force_acpi_ht,
-        .ident = "IBM eserver xSeries 330",
-        .matches = {
-                    DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-                    DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
-                    },
-        },
-       {
-        .callback = force_acpi_ht,
-        .ident = "IBM eserver xSeries 440",
-        .matches = {
-                    DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-                    DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
-                    },
-        },
-
-       /*
-        * Boxes that need ACPI PCI IRQ routing disabled
-        */
-       {
-        .callback = disable_acpi_irq,
-        .ident = "ASUS A7V",
-        .matches = {
-                    DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
-                    DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
-                    /* newer BIOS, Revision 1011, does work */
-                    DMI_MATCH(DMI_BIOS_VERSION,
-                              "ASUS A7V ACPI BIOS Revision 1007"),
-                    },
-        },
-       {}
-};
-
-#endif                         /* __i386__ */
-
 /*
  * acpi_boot_table_init() and acpi_boot_init()
  *  called from setup_arch(), always.
@@ -785,10 +608,6 @@ int __init acpi_boot_table_init(void)
 {
        int error;
 
-#ifdef __i386__
-       dmi_check_system(acpi_dmi_table);
-#endif
-
        /*
         * If acpi_disabled, bail out
         * One exception: acpi=ht continues far enough to enumerate LAPICs
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/acpi/cpufreq/cpufreq.c
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c       Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c       Wed Sep 12 13:29:30 2012 +0100
@@ -311,35 +311,6 @@ unsigned int get_measured_perf(unsigned 
     saved->aperf.whole = readin.aperf.whole;
     saved->mperf.whole = readin.mperf.whole;
 
-#ifdef __i386__
-    /*
-     * We dont want to do 64 bit divide with 32 bit kernel
-     * Get an approximate value. Return failure in case we cannot get
-     * an approximate value.
-     */
-    if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) {
-        int shift_count;
-        uint32_t h;
-
-        h = max_t(uint32_t, cur.aperf.split.hi, cur.mperf.split.hi);
-        shift_count = fls(h);
-
-        cur.aperf.whole >>= shift_count;
-        cur.mperf.whole >>= shift_count;
-    }
-
-    if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) {
-        int shift_count = 7;
-        cur.aperf.split.lo >>= shift_count;
-        cur.mperf.split.lo >>= shift_count;
-    }
-
-    if (cur.aperf.split.lo && cur.mperf.split.lo)
-        perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo;
-    else
-        perf_percent = 0;
-
-#else
     if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) {
         int shift_count = 7;
         cur.aperf.whole >>= shift_count;
@@ -351,8 +322,6 @@ unsigned int get_measured_perf(unsigned 
     else
         perf_percent = 0;
 
-#endif
-
     retval = policy->cpuinfo.max_freq * perf_percent / 100;
 
     return retval;
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/acpi/suspend.c
--- a/xen/arch/x86/acpi/suspend.c       Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/acpi/suspend.c       Wed Sep 12 13:29:30 2012 +0100
@@ -15,18 +15,15 @@
 #include <asm/i387.h>
 #include <xen/hypercall.h>
 
-#if defined(CONFIG_X86_64)
 static unsigned long saved_lstar, saved_cstar;
 static unsigned long saved_sysenter_esp, saved_sysenter_eip;
 static unsigned long saved_fs_base, saved_gs_base, saved_kernel_gs_base;
 static uint16_t saved_segs[4];
-#endif
 
 void save_rest_processor_state(void)
 {
     vcpu_save_fpu(current);
 
-#if defined(CONFIG_X86_64)
     asm volatile (
         "movw %%ds,(%0); movw %%es,2(%0); movw %%fs,4(%0); movw %%gs,6(%0)"
         : : "r" (saved_segs) : "memory" );
@@ -40,7 +37,6 @@ void save_rest_processor_state(void)
         rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
         rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
     }
-#endif
 }
 
 
@@ -50,7 +46,6 @@ void restore_rest_processor_state(void)
 
     load_TR();
 
-#if defined(CONFIG_X86_64)
     /* Recover syscall MSRs */
     wrmsrl(MSR_LSTAR, saved_lstar);
     wrmsrl(MSR_CSTAR, saved_cstar);
@@ -80,11 +75,6 @@ void restore_rest_processor_state(void)
         do_set_segment_base(SEGBASE_GS_USER_SEL, saved_segs[3]);
     }
 
-#else /* !defined(CONFIG_X86_64) */
-    if ( supervisor_mode_kernel && cpu_has_sep )
-        wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1, 0);
-#endif
-
     /* Maybe load the debug registers. */
     BUG_ON(is_hvm_vcpu(curr));
     if ( !is_idle_vcpu(curr) && curr->arch.debugreg[7] )
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/acpi/wakeup_prot.S
--- a/xen/arch/x86/acpi/wakeup_prot.S   Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/acpi/wakeup_prot.S   Wed Sep 12 13:29:30 2012 +0100
@@ -8,8 +8,6 @@
 #include <asm/page.h>
 #include <asm/msr.h>
 
-#if defined(__x86_64__)
-
         .code64
 
 #define GREG(x)         %r##x
@@ -20,20 +18,6 @@
 
 #define REF(x)          x(%rip)
 
-#else /* !defined(__x86_64__) */
-
-        .code32
-
-#define GREG(x)         %e##x
-#define SAVED_GREG(x)   saved_e##x
-#define DECLARE_GREG(x) saved_e##x:     .long   0
-#define SAVE_GREG(x)    movl GREG(x), SAVED_GREG(x)
-#define LOAD_GREG(x)    movl SAVED_GREG(x), GREG(x)
-
-#define REF(x)          x
-
-#endif
-
 ENTRY(do_suspend_lowlevel)
 
         SAVE_GREG(sp)
@@ -45,8 +29,6 @@ ENTRY(do_suspend_lowlevel)
         SAVE_GREG(si)
         SAVE_GREG(di)
 
-#if defined(__x86_64__)
-
         SAVE_GREG(8)     # save r8...r15
         SAVE_GREG(9)
         SAVE_GREG(10)
@@ -61,16 +43,6 @@ ENTRY(do_suspend_lowlevel)
         mov     %cr8, GREG(ax)
         mov     GREG(ax), REF(saved_cr8)
 
-#else /* !defined(__x86_64__) */
-
-        pushfl;
-        popl    SAVED_GREG(flags)
-
-        mov     %ds, REF(saved_ds)
-        mov     %es, REF(saved_es)
-
-#endif
-
         mov     %ss, REF(saved_ss)
 
         sgdt    REF(saved_gdt)
@@ -85,17 +57,9 @@ ENTRY(do_suspend_lowlevel)
 
         call    save_rest_processor_state
 
-#if defined(__x86_64__)
-
         mov     $3, %rdi
         xor     %eax, %eax
 
-#else /* !defined(__x86_64__) */
-
-        push    $3
-
-#endif
-
         /* enter sleep state physically */
         call    acpi_enter_sleep_state
         jmp     __ret_point
@@ -121,7 +85,6 @@ __ret_point:
         mov     REF(saved_ss), %ss
         LOAD_GREG(sp)
 
-#if defined(__x86_64__)
         /* Reload code selector */
         pushq   $(__HYPERVISOR_CS64)
         leaq    1f(%rip),%rax
@@ -134,17 +97,6 @@ 1:
         pushq   SAVED_GREG(flags)
         popfq
 
-#else /* !defined(__x86_64__) */
-
-        pushl   SAVED_GREG(flags)
-        popfl
-
-        /* No reload to fs/gs, which is saved in bottom stack already */
-        mov     REF(saved_ds), %ds
-        mov     REF(saved_es), %es
-
-#endif
-
         call restore_rest_processor_state
 
         LOAD_GREG(bp)
@@ -154,7 +106,6 @@ 1:
         LOAD_GREG(dx)
         LOAD_GREG(si)
         LOAD_GREG(di)
-#if defined(__x86_64__)
         LOAD_GREG(8)     # save r8...r15
         LOAD_GREG(9)
         LOAD_GREG(10)
@@ -163,7 +114,6 @@ 1:
         LOAD_GREG(13)
         LOAD_GREG(14)
         LOAD_GREG(15)
-#endif
         ret 
 
 .data
@@ -184,8 +134,6 @@ DECLARE_GREG(si)
 DECLARE_GREG(di)
 DECLARE_GREG(flags)
 
-#if defined(__x86_64__)
-
 DECLARE_GREG(8)
 DECLARE_GREG(9)
 DECLARE_GREG(10)
@@ -202,17 +150,3 @@ saved_ldt:      .quad   0,0
 saved_cr0:      .quad   0
 saved_cr3:      .quad   0
 saved_cr8:      .quad   0
-
-#else /* !defined(__x86_64__) */
-
-saved_gdt:      .long   0,0
-saved_idt:      .long   0,0
-saved_ldt:      .long   0
-
-saved_cr0:      .long   0
-saved_cr3:      .long   0
-
-saved_ds:       .word   0
-saved_es:       .word   0
-
-#endif 
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/boot/head.S
--- a/xen/arch/x86/boot/head.S  Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/boot/head.S  Wed Sep 12 13:29:30 2012 +0100
@@ -119,7 +119,6 @@ 1:
 1:      mov     %edx,sym_phys(cpuid_ext_features)
         mov     %edx,sym_phys(boot_cpu_data)+CPUINFO86_ext_features
 
-#if defined(__x86_64__)
         /* Check for availability of long mode. */
         bt      $29,%edx
         jnc     bad_cpu
@@ -138,7 +137,6 @@ 1:      mov     %eax,(%edx)
         mov     $sym_phys(l1_identmap)+__PAGE_HYPERVISOR,%edi
         mov     %edi,sym_phys(l2_xenmap)
         mov     %edi,sym_phys(l2_bootmap)
-#endif
 
         /* Apply relocations to bootstrap trampoline. */
         mov     sym_phys(trampoline_phys),%edx
@@ -191,11 +189,7 @@ trampoline_end:
 
         .text
 __high_start:
-#ifdef __x86_64__
 #include "x86_64.S"
-#else
-#include "x86_32.S"
-#endif
 
         .section .data.page_aligned, "aw", @progbits
         .p2align PAGE_SHIFT
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/boot/trampoline.S
--- a/xen/arch/x86/boot/trampoline.S    Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/boot/trampoline.S    Wed Sep 12 13:29:30 2012 +0100
@@ -109,8 +109,6 @@ 1:      wrmsr
         jmp     1f
 1:
 
-#if defined(__x86_64__)
-
         /* Now in compatibility mode. Long-jump into 64-bit mode. */
         ljmp    $BOOT_CS64,$bootsym_rel(start64,6)
 
@@ -123,20 +121,6 @@ start64:
 high_start:
         .quad   __high_start
 
-#else /* !defined(__x86_64__) */
-
-        /* Install relocated selectors. */
-        lgdt    gdt_descr
-        mov     $(__HYPERVISOR_DS),%eax
-        mov     %eax,%ds
-        mov     %eax,%es
-        mov     %eax,%fs
-        mov     %eax,%gs
-        mov     %eax,%ss
-        ljmp    $(__HYPERVISOR_CS),$__high_start
-
-#endif
-
         .code32
 trampoline_boot_cpu_entry:
         cmpb    $0,bootsym_rel(skip_realmode,5)
@@ -169,7 +153,6 @@ 1:      mov     %cs,%ax
         lidt    bootsym(rm_idt)
         sti
 
-#if defined(__x86_64__)
         /*
          * Declare that our target operating mode is long mode.
          * Initialise 32-bit registers since some buggy BIOSes depend on it.
@@ -177,7 +160,6 @@ 1:      mov     %cs,%ax
         movl    $0xec00,%eax      # declare target operating mode
         movl    $0x0002,%ebx      # long mode
         int     $0x15
-#endif
 
         /*
          * Do real-mode work:
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/boot/wakeup.S
--- a/xen/arch/x86/boot/wakeup.S        Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/boot/wakeup.S        Wed Sep 12 13:29:30 2012 +0100
@@ -161,8 +161,6 @@ 1:      wrmsr
         jmp     1f
 1:
 
-#if defined(__x86_64__)
-
         /* Now in compatibility mode. Long-jump to 64-bit mode */
         ljmp    $BOOT_CS64, $bootsym_rel(wakeup_64,6)
 
@@ -175,16 +173,6 @@ wakeup_64:
 ret_point:
         .quad   __ret_point
 
-#else /* !defined(__x86_64__) */
-
-        lgdt    gdt_descr
-        mov     $(__HYPERVISOR_DS), %eax
-        mov     %eax, %ds
-
-        ljmp    $(__HYPERVISOR_CS), $__ret_point
-
-#endif
-
 bogus_saved_magic:
         movw    $0x0e00 + 'S', 0xb8014
         jmp     bogus_saved_magic
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/boot/x86_32.S
--- a/xen/arch/x86/boot/x86_32.S        Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,131 +0,0 @@
-        .code32
-        
-        /* Enable full CR4 features. */
-        mov     mmu_cr4_features,%eax
-        mov     %eax,%cr4
-        
-        /* Initialise stack. */
-        mov     stack_start,%esp
-        or      $(STACK_SIZE-CPUINFO_sizeof),%esp
-        
-        /* Reset EFLAGS (subsumes CLI and CLD). */
-        pushl   $0
-        popf
-
-        lidt    idt_descr
-
-        test    %ebx,%ebx
-        jnz     start_secondary
-
-        /* Initialise IDT with simple error defaults. */
-        lea     ignore_int,%edx
-        mov     $(__HYPERVISOR_CS << 16),%eax
-        mov     %dx,%ax            /* selector = 0x0010 = cs */
-        mov     $0x8E00,%dx        /* interrupt gate - dpl=0, present */
-        lea     idt_table,%edi
-        mov     $256,%ecx
-1:      mov     %eax,(%edi)
-        mov     %edx,4(%edi)
-        add     $8,%edi
-        loop    1b
-                
-        /* Pass off the Multiboot info structure to C land. */
-        pushl   multiboot_ptr
-        call    __start_xen
-        ud2     /* Force a panic (invalid opcode). */
-
-/* This is the default interrupt handler. */
-int_msg:
-        .asciz "Unknown interrupt (cr2=%08x)\n"
-hex_msg:
-        .asciz "  %08x"
-        ALIGN
-ignore_int:
-        pusha
-        cld
-        mov     $(__HYPERVISOR_DS),%eax
-        mov     %eax,%ds
-        mov     %eax,%es
-        mov     %cr2,%eax
-        push    %eax
-        pushl   $int_msg
-        call    printk
-        add     $8,%esp
-        mov     %esp,%ebp
-0:      pushl   (%ebp)
-        add     $4,%ebp
-        pushl   $hex_msg
-        call    printk
-        add     $8,%esp
-        test    $0xffc,%ebp
-        jnz     0b
-1:      jmp     1b
-
-        .data
-        ALIGN
-ENTRY(stack_start)
-        .long cpu0_stack
-        
-/*** DESCRIPTOR TABLES ***/
-
-        ALIGN
-multiboot_ptr:
-        .long   0
-        
-        .word   0    
-idt_descr:
-        .word   256*8-1
-        .long   idt_table
-
-        .word   0
-gdt_descr:
-        .word   LAST_RESERVED_GDT_BYTE
-        .long   boot_cpu_gdt_table - FIRST_RESERVED_GDT_BYTE
-
-
-        .align 32
-ENTRY(idle_pg_table)
-        .long sym_phys(idle_pg_table_l2) + 0*PAGE_SIZE + 0x01, 0
-        .long sym_phys(idle_pg_table_l2) + 1*PAGE_SIZE + 0x01, 0
-        .long sym_phys(idle_pg_table_l2) + 2*PAGE_SIZE + 0x01, 0
-        .long sym_phys(idle_pg_table_l2) + 3*PAGE_SIZE + 0x01, 0
-
-        .section .data.page_aligned, "aw", @progbits
-        .align PAGE_SIZE, 0
-/* NB. Rings != 0 get access up to MACH2PHYS_VIRT_END. This allows access to */
-/*     the machine->physical mapping table. Ring 0 can access all memory.    */
-#define GUEST_DESC(d)                                                   \
-        .long ((MACH2PHYS_VIRT_END - 1) >> 12) & 0xffff,                \
-              ((MACH2PHYS_VIRT_END - 1) >> 12) & (0xf << 16) | (d)
-ENTRY(boot_cpu_gdt_table)
-        .quad 0x0000000000000000     /* double fault TSS */
-        .quad 0x00cf9a000000ffff     /* 0xe008 ring 0 4.00GB code at 0x0 */
-        .quad 0x00cf92000000ffff     /* 0xe010 ring 0 4.00GB data at 0x0 */
-        GUEST_DESC(0x00c0ba00)       /* 0xe019 ring 1 3.xxGB code at 0x0 */
-        GUEST_DESC(0x00c0b200)       /* 0xe021 ring 1 3.xxGB data at 0x0 */
-        GUEST_DESC(0x00c0fa00)       /* 0xe02b ring 3 3.xxGB code at 0x0 */
-        GUEST_DESC(0x00c0f200)       /* 0xe033 ring 3 3.xxGB data at 0x0 */
-        .fill (PER_CPU_GDT_ENTRY - FLAT_RING3_DS / 8 - 1), 8, 0
-        .quad 0x0000910000000000     /* per-CPU entry (limit == cpu) */
-        .align PAGE_SIZE,0
-
-#define PAGE_HYPERVISOR         __PAGE_HYPERVISOR
-#define PAGE_HYPERVISOR_NOCACHE __PAGE_HYPERVISOR_NOCACHE
-
-/* Mapping of first 16 megabytes of memory. */
-        .globl idle_pg_table_l2
-idle_pg_table_l2:
-        range = 8
-        .irp count, l2_linear_offset(__PAGE_OFFSET), \
-                    (4 * L2_PAGETABLE_ENTRIES - 
l2_linear_offset(__PAGE_OFFSET) - 1)
-        .long sym_phys(l1_identmap) + PAGE_HYPERVISOR, 0
-        pfn = 1 << PAGETABLE_ORDER
-        .rept range - 1
-        .long (pfn << PAGE_SHIFT) | PAGE_HYPERVISOR | _PAGE_PSE, 0
-        pfn = pfn + (1 << PAGETABLE_ORDER)
-        .endr
-        .fill \count - range, 8, 0
-        range = DIRECTMAP_MBYTES / 2
-        .endr
-        .long sym_phys(l1_fixmap) + PAGE_HYPERVISOR, 0
-        .size idle_pg_table_l2, . - idle_pg_table_l2
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/clear_page.S
--- a/xen/arch/x86/clear_page.S Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/clear_page.S Wed Sep 12 13:29:30 2012 +0100
@@ -1,16 +1,9 @@
 #include <xen/config.h>
 #include <asm/page.h>
 
-#ifdef __i386__
-#define ptr_reg %edx
-#else
 #define ptr_reg %rdi
-#endif
 
 ENTRY(clear_page_sse2)
-#ifdef __i386__
-        mov     4(%esp), ptr_reg
-#endif
         mov     $PAGE_SIZE/16, %ecx
         xor     %eax,%eax
 
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/copy_page.S
--- a/xen/arch/x86/copy_page.S  Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/copy_page.S  Wed Sep 12 13:29:30 2012 +0100
@@ -1,15 +1,6 @@
 #include <xen/config.h>
 #include <asm/page.h>
 
-#ifdef __i386__
-#define src_reg %esi
-#define dst_reg %edi
-#define WORD_SIZE 4
-#define tmp1_reg %eax
-#define tmp2_reg %edx
-#define tmp3_reg %ebx
-#define tmp4_reg %ebp
-#else
 #define src_reg %rsi
 #define dst_reg %rdi
 #define WORD_SIZE 8
@@ -17,17 +8,8 @@
 #define tmp2_reg %r9
 #define tmp3_reg %r10
 #define tmp4_reg %r11
-#endif
 
 ENTRY(copy_page_sse2)
-#ifdef __i386__
-        push    %ebx
-        push    %ebp
-        push    %esi
-        push    %edi
-        mov     6*4(%esp), src_reg
-        mov     5*4(%esp), dst_reg
-#endif
         mov     $PAGE_SIZE/(4*WORD_SIZE)-3, %ecx
 
         prefetchnta 2*4*WORD_SIZE(src_reg)
@@ -56,11 +38,5 @@ 1:      add     $4*WORD_SIZE, src_reg
         movnti  tmp3_reg, 2*WORD_SIZE(dst_reg)
         movnti  tmp4_reg, 3*WORD_SIZE(dst_reg)
 
-#ifdef __i386__
-        pop     %edi
-        pop     %esi
-        pop     %ebp
-        pop     %ebx
-#endif
         sfence
         ret
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/cpu/Makefile
--- a/xen/arch/x86/cpu/Makefile Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/cpu/Makefile Wed Sep 12 13:29:30 2012 +0100
@@ -6,6 +6,5 @@ obj-y += common.o
 obj-y += intel.o
 obj-y += intel_cacheinfo.o
 
-obj-$(x86_32) += centaur.o
-obj-$(x86_32) += cyrix.o
-obj-$(x86_32) += transmeta.o
+# Keeping around for VIA support (JBeulich)
+# obj-$(x86_32) += centaur.o
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/cpu/amd.c
--- a/xen/arch/x86/cpu/amd.c    Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/cpu/amd.c    Wed Sep 12 13:29:30 2012 +0100
@@ -32,11 +32,9 @@
 static char opt_famrev[14];
 string_param("cpuid_mask_cpu", opt_famrev);
 
-#ifdef __x86_64__
 /* 1 = allow, 0 = don't allow guest creation, -1 = don't allow boot */
 s8 __read_mostly opt_allow_unsafe;
 boolean_param("allow_unsafe", opt_allow_unsafe);
-#endif
 
 static inline void wrmsr_amd(unsigned int index, unsigned int lo, 
                unsigned int hi)
@@ -400,7 +398,6 @@ static void __devinit init_amd(struct cp
           3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
        clear_bit(0*32+31, c->x86_capability);
        
-#ifdef CONFIG_X86_64
        if (c->x86 == 0xf && c->x86_model < 0x14
            && cpu_has(c, X86_FEATURE_LAHF_LM)) {
                /*
@@ -416,7 +413,6 @@ static void __devinit init_amd(struct cp
                        wrmsr_amd_safe(0xc001100d, lo, hi);
                }
        }
-#endif
 
        switch(c->x86)
        {
@@ -498,7 +494,6 @@ static void __devinit init_amd(struct cp
        if (c->x86 >= 0x10 && !force_mwait)
                clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
 
-#ifdef __x86_64__
        if (!cpu_has_amd_erratum(c, AMD_ERRATUM_121))
                opt_allow_unsafe = 1;
        else if (opt_allow_unsafe < 0)
@@ -523,7 +518,6 @@ static void __devinit init_amd(struct cp
 
                fam10h_check_enable_mmcfg();
        }
-#endif
 
        /*
         * Family 0x12 and above processors have APIC timer
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/cpu/common.c
--- a/xen/arch/x86/cpu/common.c Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/cpu/common.c Wed Sep 12 13:29:30 2012 +0100
@@ -281,31 +281,6 @@ static void __cpuinit generic_identify(s
 #endif
 }
 
-#ifdef __i386__
-
-static bool_t __cpuinitdata disable_x86_fxsr;
-boolean_param("nofxsr", disable_x86_fxsr);
-
-static bool_t __cpuinitdata disable_x86_serial_nr;
-boolean_param("noserialnumber", disable_x86_serial_nr);
-
-static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
-{
-       if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
-               /* Disable processor serial number */
-               uint64_t msr_content;
-               rdmsrl(MSR_IA32_BBL_CR_CTL,msr_content);
-               wrmsrl(MSR_IA32_BBL_CR_CTL, msr_content | 0x200000);
-               printk(KERN_NOTICE "CPU serial number disabled.\n");
-               clear_bit(X86_FEATURE_PN, c->x86_capability);
-
-               /* Disabling the serial number may affect the cpuid level */
-               c->cpuid_level = cpuid_eax(0);
-       }
-}
-
-#endif
-
 /*
  * This does the hard work of actually picking apart the CPU stuff...
  */
@@ -372,20 +347,6 @@ void __cpuinit identify_cpu(struct cpuin
         * we do "generic changes."
         */
 
-#ifdef __i386__
-       /* Disable the PN if appropriate */
-       squash_the_stupid_serial_number(c);
-
-       /* FXSR disabled? */
-       if (disable_x86_fxsr) {
-               clear_bit(X86_FEATURE_FXSR, c->x86_capability);
-               if (!cpu_has_xsave) {
-                       clear_bit(X86_FEATURE_XMM, c->x86_capability);
-                       clear_bit(X86_FEATURE_AES, c->x86_capability);
-               }
-       }
-#endif
-
        for (i = 0 ; i < NCAPINTS ; ++i)
                c->x86_capability[i] &= ~cleared_caps[i];
 
@@ -602,12 +563,6 @@ void __init early_cpu_init(void)
 {
        intel_cpu_init();
        amd_init_cpu();
-#ifdef CONFIG_X86_32
-       cyrix_init_cpu();
-       nsc_init_cpu();
-       centaur_init_cpu();
-       transmeta_init_cpu();
-#endif
        early_cpu_detect();
 }
 /*
@@ -648,16 +603,9 @@ void __cpuinit cpu_init(void)
 
        /* Set up and load the per-CPU TSS and LDT. */
        t->bitmap = IOBMP_INVALID_OFFSET;
-#if defined(CONFIG_X86_32)
-       t->ss0  = __HYPERVISOR_DS;
-       t->esp0 = get_stack_bottom();
-       if ( supervisor_mode_kernel && cpu_has_sep )
-               wrmsr(MSR_IA32_SYSENTER_ESP, &t->esp1, 0);
-#elif defined(CONFIG_X86_64)
        /* Bottom-of-stack must be 16-byte aligned! */
        BUG_ON((get_stack_bottom() & 15) != 0);
        t->rsp0 = get_stack_bottom();
-#endif
        load_TR();
        asm volatile ( "lldt %%ax" : : "a" (0) );
 
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/cpu/cyrix.c
--- a/xen/arch/x86/cpu/cyrix.c  Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,313 +0,0 @@
-#include <xen/config.h>
-#include <xen/init.h>
-#include <xen/irq.h>
-#include <xen/bitops.h>
-#include <xen/delay.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-
-#include "cpu.h"
-
-/*
- * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the 
CPU
- */
-void __init do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
-{
-       unsigned char ccr3;
-       unsigned long flags;
-       
-       /* we test for DEVID by checking whether CCR3 is writable */
-       local_irq_save(flags);
-       ccr3 = getCx86(CX86_CCR3);
-       setCx86(CX86_CCR3, ccr3 ^ 0x80);
-       getCx86(0xc0);   /* dummy to change bus */
-
-       if (getCx86(CX86_CCR3) == ccr3)       /* no DEVID regs. */
-               BUG();
-       else {
-               setCx86(CX86_CCR3, ccr3);  /* restore CCR3 */
-
-               /* read DIR0 and DIR1 CPU registers */
-               *dir0 = getCx86(CX86_DIR0);
-               *dir1 = getCx86(CX86_DIR1);
-       }
-       local_irq_restore(flags);
-}
-
-/*
- * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
- * order to identify the Cyrix CPU model after we're out of setup.c
- *
- * Actually since bugs.h doesn't even reference this perhaps someone should
- * fix the documentation ???
- */
-static unsigned char Cx86_dir0_msb __initdata = 0;
-
-static char Cx86_model[][9] __initdata = {
-       "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
-       "M II ", "Unknown"
-};
-static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock";
-static char cyrix_model_mult1[] __initdata = "12??43";
-static char cyrix_model_mult2[] __initdata = "12233445";
-
-/*
- * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
- * BIOSes for compatibility with DOS games.  This makes the udelay loop
- * work correctly, and improves performance.
- *
- * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
- */
-
-static void __init check_cx686_slop(struct cpuinfo_x86 *c)
-{
-       unsigned long flags;
-       
-       if (Cx86_dir0_msb == 3) {
-               unsigned char ccr3, ccr5;
-
-               local_irq_save(flags);
-               ccr3 = getCx86(CX86_CCR3);
-               setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
-               ccr5 = getCx86(CX86_CCR5);
-               if (ccr5 & 2)
-                       setCx86(CX86_CCR5, ccr5 & 0xfd);  /* reset SLOP */
-               setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
-               local_irq_restore(flags);
-       }
-}
-
-
-static void __init set_cx86_reorder(void)
-{
-       u8 ccr3;
-
-       printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC 
processor.\n");
-       ccr3 = getCx86(CX86_CCR3);
-       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
-
-       /* Load/Store Serialize to mem access disable (=reorder it)  */
-       setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
-       /* set load/store serialize from 1GB to 4GB */
-       ccr3 |= 0xe0;
-       setCx86(CX86_CCR3, ccr3);
-}
-
-static void __init set_cx86_memwb(void)
-{
-       u32 cr0;
-
-       printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC 
processor.\n");
-
-       /* CCR2 bit 2: unlock NW bit */
-       setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
-       /* set 'Not Write-through' */
-       cr0 = 0x20000000;
-       __asm__("movl %%cr0,%%eax\n\t"
-               "orl %0,%%eax\n\t"
-               "movl %%eax,%%cr0\n"
-               : : "r" (cr0)
-               :"ax");
-       /* CCR2 bit 2: lock NW bit and set WT1 */
-       setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 );
-}
-
-static void __init set_cx86_inc(void)
-{
-       unsigned char ccr3;
-
-       printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
-
-       ccr3 = getCx86(CX86_CCR3);
-       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
-       /* PCR1 -- Performance Control */
-       /* Incrementor on, whatever that is */
-       setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
-       /* PCR0 -- Performance Control */
-       /* Incrementor Margin 10 */
-       setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04);
-       setCx86(CX86_CCR3, ccr3);       /* disable MAPEN */
-}
-
-/*
- *     Configure later MediaGX and/or Geode processor.
- */
-
-static void __init geode_configure(void)
-{
-       unsigned long flags;
-       u8 ccr3, ccr4;
-       local_irq_save(flags);
-
-       /* Suspend on halt power saving and enable #SUSP pin */
-       setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
-
-       ccr3 = getCx86(CX86_CCR3);
-       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);       /* Enable */
-       
-       ccr4 = getCx86(CX86_CCR4);
-       ccr4 |= 0x38;           /* FPU fast, DTE cache, Mem bypass */
-       
-       setCx86(CX86_CCR3, ccr3);
-       
-       set_cx86_memwb();
-       set_cx86_reorder();     
-       set_cx86_inc();
-       
-       local_irq_restore(flags);
-}
-
-
-static void __init init_cyrix(struct cpuinfo_x86 *c)
-{
-       unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
-       const char *p = NULL;
-
-       /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-          3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-       clear_bit(0*32+31, c->x86_capability);
-
-       /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
-       if ( test_bit(1*32+24, c->x86_capability) ) {
-               clear_bit(1*32+24, c->x86_capability);
-               set_bit(X86_FEATURE_CXMMX, c->x86_capability);
-       }
-
-       do_cyrix_devid(&dir0, &dir1);
-
-       check_cx686_slop(c);
-
-       Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family"   */
-       dir0_lsn = dir0 & 0xf;                /* model or clock multiplier */
-
-       /* common case step number/rev -- exceptions handled below */
-       c->x86_model = (dir1 >> 4) + 1;
-       c->x86_mask = dir1 & 0xf;
-
-       /* Now cook; the original recipe is by Channing Corn, from Cyrix.
-        * We do the same thing for each generation: we work out
-        * the model, multiplier and stepping.  Black magic included,
-        * to make the silicon step/rev numbers match the printed ones.
-        */
-        
-       switch (dir0_msn) {
-               unsigned char tmp;
-
-       case 3: /* 6x86/6x86L */
-               Cx86_cb[1] = ' ';
-               Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
-               if (dir1 > 0x21) { /* 686L */
-                       Cx86_cb[0] = 'L';
-                       p = Cx86_cb;
-                       (c->x86_model)++;
-               } else             /* 686 */
-                       p = Cx86_cb+1;
-               /* Emulate MTRRs using Cyrix's ARRs. */
-               set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
-               /* 6x86's contain this bug */
-               /*c->coma_bug = 1;*/
-               break;
-
-       case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
-               c->x86_cache_size=16;   /* Yep 16K integrated cache thats it */
- 
-               /* GXm supports extended cpuid levels 'ala' AMD */
-               if (c->cpuid_level == 2) {
-                       /* Enable cxMMX extensions (GX1 Datasheet 54) */
-                       setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
-                       
-                       /* GXlv/GXm/GX1 */
-                       if((dir1 >= 0x50 && dir1 <= 0x54) || dir1 >= 0x63)
-                               geode_configure();
-                       get_model_name(c);  /* get CPU marketing name */
-                       return;
-               }
-               else {  /* MediaGX */
-                       Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
-                       p = Cx86_cb+2;
-                       c->x86_model = (dir1 & 0x20) ? 1 : 2;
-               }
-               break;
-
-        case 5: /* 6x86MX/M II */
-               if (dir1 > 7)
-               {
-                       dir0_msn++;  /* M II */
-                       /* Enable MMX extensions (App note 108) */
-                       setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
-               }
-               else
-               {
-                       /*c->coma_bug = 1;*/      /* 6x86MX, it has the bug. */
-               }
-               tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
-               Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
-               p = Cx86_cb+tmp;
-               if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
-                       (c->x86_model)++;
-               /* Emulate MTRRs using Cyrix's ARRs. */
-               set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
-               break;
-
-       default:  /* unknown (shouldn't happen, we know everyone ;-) */
-               dir0_msn = 7;
-               break;
-       }
-       safe_strcpy(c->x86_model_id, Cx86_model[dir0_msn & 7]);
-       if (p) safe_strcat(c->x86_model_id, p);
-       return;
-}
-
-/*
- * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
- * by the fact that they preserve the flags across the division of 5/2.
- * PII and PPro exhibit this behavior too, but they have cpuid available.
- */
- 
-/*
- * Perform the Cyrix 5/2 test. A Cyrix won't change
- * the flags, while other 486 chips will.
- */
-static inline int test_cyrix_52div(void)
-{
-       unsigned int test;
-
-       __asm__ __volatile__(
-            "sahf\n\t"         /* clear flags (%eax = 0x0005) */
-            "div %b2\n\t"      /* divide 5 by 2 */
-            "lahf"             /* store flags into %ah */
-            : "=a" (test)
-            : "0" (5), "q" (2)
-            : "cc");
-
-       /* AH is 0x02 on Cyrix after the divide.. */
-       return (unsigned char) (test >> 8) == 0x02;
-}
-
-static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
-       .c_vendor       = "Cyrix",
-       .c_ident        = { "CyrixInstead" },
-       .c_init         = init_cyrix,
-};
-
-int __init cyrix_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
-       return 0;
-}
-
-//early_arch_initcall(cyrix_init_cpu);
-
-static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
-       .c_vendor       = "NSC",
-       .c_ident        = { "Geode by NSC" },
-       .c_init         = init_cyrix,
-};
-
-int __init nsc_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev;
-       return 0;
-}
-
-//early_arch_initcall(nsc_init_cpu);
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c       Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c       Wed Sep 12 13:29:30 2012 +0100
@@ -445,10 +445,8 @@ intel_get_extended_msrs(struct mcinfo_gl
     for (i = MSR_IA32_MCG_EAX; i <= MSR_IA32_MCG_MISC; i++)
         intel_get_extended_msr(mc_ext, i);
 
-#ifdef __x86_64__
     for (i = MSR_IA32_MCG_R8; i <= MSR_IA32_MCG_R15; i++)
         intel_get_extended_msr(mc_ext, i);
-#endif
 
     return mc_ext;
 }
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/cpu/mtrr/Makefile
--- a/xen/arch/x86/cpu/mtrr/Makefile    Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/cpu/mtrr/Makefile    Wed Sep 12 13:29:30 2012 +0100
@@ -1,5 +1,2 @@
-obj-$(x86_32) += amd.o
-obj-$(x86_32) += cyrix.o
 obj-y += generic.o
 obj-y += main.o
-obj-$(x86_32) += state.o
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/cpu/mtrr/amd.c
--- a/xen/arch/x86/cpu/mtrr/amd.c       Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,121 +0,0 @@
-#include <xen/init.h>
-#include <xen/mm.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-
-#include "mtrr.h"
-
-static void
-amd_get_mtrr(unsigned int reg, unsigned long *base,
-            unsigned long *size, mtrr_type * type)
-{
-       unsigned long low, high;
-
-       rdmsr(MSR_K6_UWCCR, low, high);
-       /*  Upper dword is region 1, lower is region 0  */
-       if (reg == 1)
-               low = high;
-       /*  The base masks off on the right alignment  */
-       *base = (low & 0xFFFE0000) >> PAGE_SHIFT;
-       *type = 0;
-       if (low & 1)
-               *type = MTRR_TYPE_UNCACHABLE;
-       if (low & 2)
-               *type = MTRR_TYPE_WRCOMB;
-       if (!(low & 3)) {
-               *size = 0;
-               return;
-       }
-       /*
-        *  This needs a little explaining. The size is stored as an
-        *  inverted mask of bits of 128K granularity 15 bits long offset
-        *  2 bits
-        *
-        *  So to get a size we do invert the mask and add 1 to the lowest
-        *  mask bit (4 as its 2 bits in). This gives us a size we then shift
-        *  to turn into 128K blocks
-        *
-        *  eg              111 1111 1111 1100      is 512K
-        *
-        *  invert          000 0000 0000 0011
-        *  +1              000 0000 0000 0100
-        *  *128K   ...
-        */
-       low = (~low) & 0x1FFFC;
-       *size = (low + 4) << (15 - PAGE_SHIFT);
-       return;
-}
-
-static void amd_set_mtrr(unsigned int reg, unsigned long base,
-                        unsigned long size, mtrr_type type)
-/*  [SUMMARY] Set variable MTRR register on the local CPU.
-    <reg> The register to set.
-    <base> The base address of the region.
-    <size> The size of the region. If this is 0 the region is disabled.
-    <type> The type of the region.
-    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
-    be done externally.
-    [RETURNS] Nothing.
-*/
-{
-       u32 regs[2];
-
-       /*
-        *  Low is MTRR0 , High MTRR 1
-        */
-       rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
-       /*
-        *  Blank to disable
-        */
-       if (size == 0)
-               regs[reg] = 0;
-       else
-               /* Set the register to the base, the type (off by one) and an
-                  inverted bitmask of the size The size is the only odd
-                  bit. We are fed say 512K We invert this and we get 111 1111
-                  1111 1011 but if you subtract one and invert you get the   
-                  desired 111 1111 1111 1100 mask
-
-                  But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!  */
-               regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
-                   | (base << PAGE_SHIFT) | (type + 1);
-
-       /*
-        *  The writeback rule is quite specific. See the manual. Its
-        *  disable local interrupts, write back the cache, set the mtrr
-        */
-       wbinvd();
-       wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
-}
-
-static int amd_validate_add_page(unsigned long base, unsigned long size, 
unsigned int type)
-{
-       /* Apply the K6 block alignment and size rules
-          In order
-          o Uncached or gathering only
-          o 128K or bigger block
-          o Power of 2 block
-          o base suitably aligned to the power
-       */
-       if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
-           || (size & ~(size - 1)) - size || (base & (size - 1)))
-               return -EINVAL;
-       return 0;
-}
-
-static const struct mtrr_ops amd_mtrr_ops = {
-       .vendor            = X86_VENDOR_AMD,
-       .set               = amd_set_mtrr,
-       .get               = amd_get_mtrr,
-       .get_free_region   = generic_get_free_region,
-       .validate_add_page = amd_validate_add_page,
-       .have_wrcomb       = positive_have_wrcomb,
-};
-
-int __init amd_init_mtrr(void)
-{
-       set_mtrr_ops(&amd_mtrr_ops);
-       return 0;
-}
-
-//arch_initcall(amd_mtrr_init);
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/cpu/mtrr/cyrix.c
--- a/xen/arch/x86/cpu/mtrr/cyrix.c     Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,376 +0,0 @@
-#include <xen/init.h>
-#include <xen/mm.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-#include <asm/io.h>
-#include "mtrr.h"
-
-int arr3_protected;
-
-static void
-cyrix_get_arr(unsigned int reg, unsigned long *base,
-             unsigned long *size, mtrr_type * type)
-{
-       unsigned long flags;
-       unsigned char arr, ccr3, rcr, shift;
-
-       arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
-
-       /* Save flags and disable interrupts */
-       local_irq_save(flags);
-
-       ccr3 = getCx86(CX86_CCR3);
-       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);       /* enable MAPEN */
-       ((unsigned char *) base)[3] = getCx86(arr);
-       ((unsigned char *) base)[2] = getCx86(arr + 1);
-       ((unsigned char *) base)[1] = getCx86(arr + 2);
-       rcr = getCx86(CX86_RCR_BASE + reg);
-       setCx86(CX86_CCR3, ccr3);       /* disable MAPEN */
-
-       /* Enable interrupts if it was enabled previously */
-       local_irq_restore(flags);
-       shift = ((unsigned char *) base)[1] & 0x0f;
-       *base >>= PAGE_SHIFT;
-
-       /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
-        * Note: shift==0xf means 4G, this is unsupported.
-        */
-       if (shift)
-               *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
-       else
-               *size = 0;
-
-       /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
-       if (reg < 7) {
-               switch (rcr) {
-               case 1:
-                       *type = MTRR_TYPE_UNCACHABLE;
-                       break;
-               case 8:
-                       *type = MTRR_TYPE_WRBACK;
-                       break;
-               case 9:
-                       *type = MTRR_TYPE_WRCOMB;
-                       break;
-               case 24:
-               default:
-                       *type = MTRR_TYPE_WRTHROUGH;
-                       break;
-               }
-       } else {
-               switch (rcr) {
-               case 0:
-                       *type = MTRR_TYPE_UNCACHABLE;
-                       break;
-               case 8:
-                       *type = MTRR_TYPE_WRCOMB;
-                       break;
-               case 9:
-                       *type = MTRR_TYPE_WRBACK;
-                       break;
-               case 25:
-               default:
-                       *type = MTRR_TYPE_WRTHROUGH;
-                       break;
-               }
-       }
-}
-
-static int
-cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
-/*  [SUMMARY] Get a free ARR.
-    <base> The starting (base) address of the region.
-    <size> The size (in bytes) of the region.
-    [RETURNS] The index of the region on success, else -1 on error.
-*/
-{
-       int i;
-       mtrr_type ltype;
-       unsigned long lbase, lsize;
-
-       switch (replace_reg) {
-       case 7:
-               if (size < 0x40)
-                       break;
-       case 6:
-       case 5:
-       case 4:
-               return replace_reg;
-       case 3:
-               if (arr3_protected)
-                       break;
-       case 2:
-       case 1:
-       case 0:
-               return replace_reg;
-       }
-       /* If we are to set up a region >32M then look at ARR7 immediately */
-       if (size > 0x2000) {
-               cyrix_get_arr(7, &lbase, &lsize, &ltype);
-               if (lsize == 0)
-                       return 7;
-               /*  Else try ARR0-ARR6 first  */
-       } else {
-               for (i = 0; i < 7; i++) {
-                       cyrix_get_arr(i, &lbase, &lsize, &ltype);
-                       if ((i == 3) && arr3_protected)
-                               continue;
-                       if (lsize == 0)
-                               return i;
-               }
-               /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 
256K */
-               cyrix_get_arr(i, &lbase, &lsize, &ltype);
-               if ((lsize == 0) && (size >= 0x40))
-                       return i;
-       }
-       return -ENOSPC;
-}
-
-static u32 cr4 = 0;
-static u32 ccr3;
-
-static void prepare_set(void)
-{
-       u32 cr0;
-
-       /*  Save value of CR4 and clear Page Global Enable (bit 7)  */
-       if ( cpu_has_pge ) {
-               cr4 = read_cr4();
-               write_cr4(cr4 & ~X86_CR4_PGE);
-       }
-
-       /*  Disable and flush caches. Note that wbinvd flushes the TLBs as
-           a side-effect  */
-       cr0 = read_cr0() | 0x40000000;
-       wbinvd();
-       write_cr0(cr0);
-       wbinvd();
-
-       /* Cyrix ARRs - everything else were excluded at the top */
-       ccr3 = getCx86(CX86_CCR3);
-
-       /* Cyrix ARRs - everything else were excluded at the top */
-       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
-
-}
-
-static void post_set(void)
-{
-       /*  Flush caches and TLBs  */
-       wbinvd();
-
-       /* Cyrix ARRs - everything else was excluded at the top */
-       setCx86(CX86_CCR3, ccr3);
-               
-       /*  Enable caches  */
-       write_cr0(read_cr0() & 0xbfffffff);
-
-       /*  Restore value of CR4  */
-       if ( cpu_has_pge )
-               write_cr4(cr4);
-}
-
-static void cyrix_set_arr(unsigned int reg, unsigned long base,
-                         unsigned long size, mtrr_type type)
-{
-       unsigned char arr, arr_type, arr_size;
-
-       arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
-
-       /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
-       if (reg >= 7)
-               size >>= 6;
-
-       size &= 0x7fff;         /* make sure arr_size <= 14 */
-       for (arr_size = 0; size; arr_size++, size >>= 1) ;
-
-       if (reg < 7) {
-               switch (type) {
-               case MTRR_TYPE_UNCACHABLE:
-                       arr_type = 1;
-                       break;
-               case MTRR_TYPE_WRCOMB:
-                       arr_type = 9;
-                       break;
-               case MTRR_TYPE_WRTHROUGH:
-                       arr_type = 24;
-                       break;
-               default:
-                       arr_type = 8;
-                       break;
-               }
-       } else {
-               switch (type) {
-               case MTRR_TYPE_UNCACHABLE:
-                       arr_type = 0;
-                       break;
-               case MTRR_TYPE_WRCOMB:
-                       arr_type = 8;
-                       break;
-               case MTRR_TYPE_WRTHROUGH:
-                       arr_type = 25;
-                       break;
-               default:
-                       arr_type = 9;
-                       break;
-               }
-       }
-
-       prepare_set();
-
-       base <<= PAGE_SHIFT;
-       setCx86(arr, ((unsigned char *) &base)[3]);
-       setCx86(arr + 1, ((unsigned char *) &base)[2]);
-       setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
-       setCx86(CX86_RCR_BASE + reg, arr_type);
-
-       post_set();
-}
-
-typedef struct {
-       unsigned long base;
-       unsigned long size;
-       mtrr_type type;
-} arr_state_t;
-
-static arr_state_t arr_state[8] = {
-       {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
-       {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
-};
-
-static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 };
-
-static void cyrix_set_all(void)
-{
-       int i;
-
-       prepare_set();
-
-       /* the CCRs are not contiguous */
-       for (i = 0; i < 4; i++)
-               setCx86(CX86_CCR0 + i, ccr_state[i]);
-       for (; i < 7; i++)
-               setCx86(CX86_CCR4 + i, ccr_state[i]);
-       for (i = 0; i < 8; i++)
-               cyrix_set_arr(i, arr_state[i].base, 
-                             arr_state[i].size, arr_state[i].type);
-
-       post_set();
-}
-
-#if 0
-/*
- * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
- * with the SMM (System Management Mode) mode. So we need the following:
- * Check whether SMI_LOCK (CCR3 bit 0) is set
- *   if it is set, write a warning message: ARR3 cannot be changed!
- *     (it cannot be changed until the next processor reset)
- *   if it is reset, then we can change it, set all the needed bits:
- *   - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
- *   - disable access to SMM memory (CCR1 bit 2 reset)
- *   - disable SMM mode (CCR1 bit 1 reset)
- *   - disable write protection of ARR3 (CCR6 bit 1 reset)
- *   - (maybe) disable ARR3
- * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
- */
-static void __init
-cyrix_arr_init(void)
-{
-       struct set_mtrr_context ctxt;
-       unsigned char ccr[7];
-       int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
-       int i;
-
-       /* flush cache and enable MAPEN */
-       set_mtrr_prepare_save(&ctxt);
-       set_mtrr_cache_disable(&ctxt);
-
-       /* Save all CCRs locally */
-       ccr[0] = getCx86(CX86_CCR0);
-       ccr[1] = getCx86(CX86_CCR1);
-       ccr[2] = getCx86(CX86_CCR2);
-       ccr[3] = ctxt.ccr3;
-       ccr[4] = getCx86(CX86_CCR4);
-       ccr[5] = getCx86(CX86_CCR5);
-       ccr[6] = getCx86(CX86_CCR6);
-
-       if (ccr[3] & 1) {
-               ccrc[3] = 1;
-               arr3_protected = 1;
-       } else {
-               /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
-                * access to SMM memory through ARR3 (bit 7).
-                */
-               if (ccr[1] & 0x80) {
-                       ccr[1] &= 0x7f;
-                       ccrc[1] |= 0x80;
-               }
-               if (ccr[1] & 0x04) {
-                       ccr[1] &= 0xfb;
-                       ccrc[1] |= 0x04;
-               }
-               if (ccr[1] & 0x02) {
-                       ccr[1] &= 0xfd;
-                       ccrc[1] |= 0x02;
-               }
-               arr3_protected = 0;
-               if (ccr[6] & 0x02) {
-                       ccr[6] &= 0xfd;
-                       ccrc[6] = 1;    /* Disable write protection of ARR3 */
-                       setCx86(CX86_CCR6, ccr[6]);
-               }
-               /* Disable ARR3. This is safe now that we disabled SMM. */
-               /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
-       }
-       /* If we changed CCR1 in memory, change it in the processor, too. */
-       if (ccrc[1])
-               setCx86(CX86_CCR1, ccr[1]);
-
-       /* Enable ARR usage by the processor */
-       if (!(ccr[5] & 0x20)) {
-               ccr[5] |= 0x20;
-               ccrc[5] = 1;
-               setCx86(CX86_CCR5, ccr[5]);
-       }
-
-       for (i = 0; i < 7; i++)
-               ccr_state[i] = ccr[i];
-       for (i = 0; i < 8; i++)
-               cyrix_get_arr(i,
-                             &arr_state[i].base, &arr_state[i].size,
-                             &arr_state[i].type);
-
-       set_mtrr_done(&ctxt);   /* flush cache and disable MAPEN */
-
-       if (ccrc[5])
-               printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled 
manually\n");
-       if (ccrc[3])
-               printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
-/*
-    if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 
disabled\n");
-    if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
-    if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
-*/
-       if (ccrc[6])
-               printk(KERN_INFO "mtrr: ARR3 was write protected, 
unprotected\n");
-}
-#endif
-
-static const struct mtrr_ops cyrix_mtrr_ops = {
-       .vendor            = X86_VENDOR_CYRIX,
-//     .init              = cyrix_arr_init,
-       .set_all           = cyrix_set_all,
-       .set               = cyrix_set_arr,
-       .get               = cyrix_get_arr,
-       .get_free_region   = cyrix_get_free_region,
-       .validate_add_page = generic_validate_add_page,
-       .have_wrcomb       = positive_have_wrcomb,
-};
-
-int __init cyrix_init_mtrr(void)
-{
-       set_mtrr_ops(&cyrix_mtrr_ops);
-       return 0;
-}
-
-//arch_initcall(cyrix_init_mtrr);
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/cpu/mtrr/main.c
--- a/xen/arch/x86/cpu/mtrr/main.c      Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/cpu/mtrr/main.c      Wed Sep 12 13:29:30 2012 +0100
@@ -79,16 +79,6 @@ static const char *mtrr_attrib_to_str(in
        return (x <= 6) ? mtrr_strings[x] : "?";
 }
 
-#ifndef CONFIG_X86_64
-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
-
-void set_mtrr_ops(const struct mtrr_ops * ops)
-{
-       if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
-               mtrr_ops[ops->vendor] = ops;
-}
-#endif
-
 /*  Returns non-zero if we have the write-combining memory type  */
 static int have_wrcomb(void)
 {
@@ -521,12 +511,6 @@ int mtrr_del_page(int reg, unsigned long
                printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
                goto out;
        }
-       if (is_cpu(CYRIX) && !use_intel()) {
-               if ((reg == 3) && arr3_protected) {
-                       printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n");
-                       goto out;
-               }
-       }
        mtrr_if->get(reg, &lbase, &lsize, &ltype);
        if (lsize < 1) {
                printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
@@ -566,18 +550,6 @@ mtrr_del(int reg, unsigned long base, un
        return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
 }
 
-/* HACK ALERT!
- * These should be called implicitly, but we can't yet until all the initcall
- * stuff is done...
- */
-static void __init init_ifs(void)
-{
-#ifndef CONFIG_X86_64
-       amd_init_mtrr();
-       cyrix_init_mtrr();
-#endif
-}
-
 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
  * MTRR driver doesn't require this
  */
@@ -598,8 +570,6 @@ unsigned int paddr_bits __read_mostly = 
  */
 void __init mtrr_bp_init(void)
 {
-       init_ifs();
-
        if (cpu_has_mtrr) {
                mtrr_if = &generic_mtrr_ops;
                size_or_mask = 0xff000000;      /* 36 bits */
@@ -627,28 +597,6 @@ void __init mtrr_bp_init(void)
                        size_or_mask = 0xfff00000;      /* 32 bits */
                        size_and_mask = 0;
                }
-       } else {
-#ifndef CONFIG_X86_64
-               switch (boot_cpu_data.x86_vendor) {
-               case X86_VENDOR_AMD:
-                       if (cpu_has_k6_mtrr) {
-                               /* Pre-Athlon (K6) AMD CPU MTRRs */
-                               mtrr_if = mtrr_ops[X86_VENDOR_AMD];
-                               size_or_mask = 0xfff00000;      /* 32 bits */
-                               size_and_mask = 0;
-                       }
-                       break;
-               case X86_VENDOR_CYRIX:
-                       if (cpu_has_cyrix_arr) {
-                               mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
-                               size_or_mask = 0xfff00000;      /* 32 bits */
-                               size_and_mask = 0;
-                       }
-                       break;
-               default:
-                       break;
-               }
-#endif
        }
 
        if (mtrr_if) {
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/cpu/mtrr/mtrr.h
--- a/xen/arch/x86/cpu/mtrr/mtrr.h      Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/cpu/mtrr/mtrr.h      Wed Sep 12 13:29:30 2012 +0100
@@ -85,9 +85,3 @@ void mtrr_wrmsr(unsigned int msr, uint64
 
 extern int amd_init_mtrr(void);
 extern int cyrix_init_mtrr(void);
-
-#ifndef CONFIG_X86_64
-extern int arr3_protected;
-#else
-#define arr3_protected 0
-#endif
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/cpu/mtrr/state.c
--- a/xen/arch/x86/cpu/mtrr/state.c     Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-#include <xen/mm.h>
-#include <xen/init.h>
-#include <asm/io.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-#include "mtrr.h"
-
-
-/*  Put the processor into a state where MTRRs can be safely set  */
-void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
-{
-       unsigned int cr0;
-
-       /*  Disable interrupts locally  */
-       local_irq_save(ctxt->flags);
-
-       if (use_intel() || is_cpu(CYRIX)) {
-
-               /*  Save value of CR4 and clear Page Global Enable (bit 7)  */
-               if ( cpu_has_pge ) {
-                       ctxt->cr4val = read_cr4();
-                       write_cr4(ctxt->cr4val & (unsigned char) ~(1 << 7));
-               }
-
-               /*  Disable and flush caches. Note that wbinvd flushes the TLBs 
as
-                   a side-effect  */
-               cr0 = read_cr0() | 0x40000000;
-               wbinvd();
-               write_cr0(cr0);
-               wbinvd();
-
-               if (use_intel()) {
-                       /*  Save MTRR state */
-                       rdmsrl(MTRRdefType_MSR, ctxt->deftype);
-               } else
-                       /* Cyrix ARRs - everything else were excluded at the 
top */
-                       ctxt->ccr3 = getCx86(CX86_CCR3);
-       }
-}
-
-void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
-{
-       if (use_intel()) 
-               /*  Disable MTRRs, and set the default type to uncached  */
-               mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype & 0xf300UL);
-       else if (is_cpu(CYRIX))
-               /* Cyrix ARRs - everything else were excluded at the top */
-               setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
-}
-
-/*  Restore the processor after a set_mtrr_prepare  */
-void set_mtrr_done(struct set_mtrr_context *ctxt)
-{
-       if (use_intel() || is_cpu(CYRIX)) {
-
-               /*  Flush caches and TLBs  */
-               wbinvd();
-
-               /*  Restore MTRRdefType  */
-               if (use_intel())
-                       /* Intel (P6) standard MTRRs */
-                       mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype);
-               else
-                       /* Cyrix ARRs - everything else was excluded at the top 
*/
-                       setCx86(CX86_CCR3, ctxt->ccr3);
-               
-               /*  Enable caches  */
-               write_cr0(read_cr0() & 0xbfffffff);
-
-               /*  Restore value of CR4  */
-               if ( cpu_has_pge )
-                       write_cr4(ctxt->cr4val);
-       }
-       /*  Re-enable interrupts locally (if enabled previously)  */
-       local_irq_restore(ctxt->flags);
-}
-
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/cpu/transmeta.c
--- a/xen/arch/x86/cpu/transmeta.c      Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,107 +0,0 @@
-#include <xen/config.h>
-#include <xen/lib.h>
-#include <xen/init.h>
-#include <asm/processor.h>
-#include <asm/msr.h>
-#include "cpu.h"
-
-static void __init init_transmeta(struct cpuinfo_x86 *c)
-{
-       unsigned int cap_mask, uk, max, dummy;
-       unsigned int cms_rev1, cms_rev2;
-       unsigned int cpu_rev, cpu_freq, cpu_flags, new_cpu_rev;
-       char cpu_info[65];
-
-       get_model_name(c);      /* Same as AMD/Cyrix */
-       display_cacheinfo(c);
-
-       /* Print CMS and CPU revision */
-       max = cpuid_eax(0x80860000);
-       cpu_rev = 0;
-       if ( max >= 0x80860001 ) {
-               cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
-               if (cpu_rev != 0x02000000) {
-                       printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, 
%u MHz\n",
-                               (cpu_rev >> 24) & 0xff,
-                               (cpu_rev >> 16) & 0xff,
-                               (cpu_rev >> 8) & 0xff,
-                               cpu_rev & 0xff,
-                               cpu_freq);
-               }
-       }
-       if ( max >= 0x80860002 ) {
-               cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy);
-               if (cpu_rev == 0x02000000) {
-                       printk(KERN_INFO "CPU: Processor revision %08X, %u 
MHz\n",
-                               new_cpu_rev, cpu_freq);
-               }
-               printk(KERN_INFO "CPU: Code Morphing Software revision 
%u.%u.%u-%u-%u\n",
-                      (cms_rev1 >> 24) & 0xff,
-                      (cms_rev1 >> 16) & 0xff,
-                      (cms_rev1 >> 8) & 0xff,
-                      cms_rev1 & 0xff,
-                      cms_rev2);
-       }
-       if ( max >= 0x80860006 ) {
-               cpuid(0x80860003,
-                     (void *)&cpu_info[0],
-                     (void *)&cpu_info[4],
-                     (void *)&cpu_info[8],
-                     (void *)&cpu_info[12]);
-               cpuid(0x80860004,
-                     (void *)&cpu_info[16],
-                     (void *)&cpu_info[20],
-                     (void *)&cpu_info[24],
-                     (void *)&cpu_info[28]);
-               cpuid(0x80860005,
-                     (void *)&cpu_info[32],
-                     (void *)&cpu_info[36],
-                     (void *)&cpu_info[40],
-                     (void *)&cpu_info[44]);
-               cpuid(0x80860006,
-                     (void *)&cpu_info[48],
-                     (void *)&cpu_info[52],
-                     (void *)&cpu_info[56],
-                     (void *)&cpu_info[60]);
-               cpu_info[64] = '\0';
-               printk(KERN_INFO "CPU: %s\n", cpu_info);
-       }
-
-       /* Unhide possibly hidden capability flags */
-       rdmsr(0x80860004, cap_mask, uk);
-       wrmsr(0x80860004, ~0, uk);
-       c->x86_capability[0] = cpuid_edx(0x00000001);
-       wrmsr(0x80860004, cap_mask, uk);
-       
-       /* If we can run i686 user-space code, call us an i686 */
-#define USER686 (X86_FEATURE_TSC|X86_FEATURE_CX8|X86_FEATURE_CMOV)
-        if ( c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686 )
-               c->x86 = 6;
-}
-
-static void transmeta_identify(struct cpuinfo_x86 * c)
-{
-       u32 xlvl;
-
-       /* Transmeta-defined flags: level 0x80860001 */
-       xlvl = cpuid_eax(0x80860000);
-       if ( (xlvl & 0xffff0000) == 0x80860000 ) {
-               if (  xlvl >= 0x80860001 )
-                       c->x86_capability[2] = cpuid_edx(0x80860001);
-       }
-}
-
-static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
-       .c_vendor       = "Transmeta",
-       .c_ident        = { "GenuineTMx86", "TransmetaCPU" },
-       .c_init         = init_transmeta,
-       .c_identify     = transmeta_identify,
-};
-
-int __init transmeta_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev;
-       return 0;
-}
-
-//early_arch_initcall(transmeta_init_cpu);
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/debug.c
--- a/xen/arch/x86/debug.c      Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/debug.c      Wed Sep 12 13:29:30 2012 +0100
@@ -70,8 +70,6 @@ dbg_hvm_va2mfn(dbgva_t vaddr, struct dom
     return mfn;
 }
 
-#if defined(__x86_64__)
-
 /* 
  * pgd3val: this is the value of init_mm.pgd[3] in a PV guest. It is optional.
  *          This to assist debug of modules in the guest. The kernel address 
@@ -143,49 +141,6 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct doma
     return mfn_valid(mfn) ? mfn : INVALID_MFN;
 }
 
-#else
-
-/* Returns: mfn for the given (pv guest) vaddr */
-static unsigned long 
-dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
-{
-    l3_pgentry_t l3e, *l3t;
-    l2_pgentry_t l2e, *l2t;
-    l1_pgentry_t l1e, *l1t;
-    unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
-    unsigned long mfn = cr3 >> PAGE_SHIFT;
-
-    DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id, 
-          cr3, pgd3val);
-
-    if ( pgd3val == 0 )
-    {
-        l3t  = map_domain_page(mfn);
-        l3t += (cr3 & 0xFE0UL) >> 3;
-        l3e = l3t[l3_table_offset(vaddr)];
-        mfn = l3e_get_pfn(l3e);
-        unmap_domain_page(l3t);
-        if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
-            return INVALID_MFN;
-    }
-
-    l2t = map_domain_page(mfn);
-    l2e = l2t[l2_table_offset(vaddr)];
-    mfn = l2e_get_pfn(l2e);
-    unmap_domain_page(l2t);
-    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || 
-         (l2e_get_flags(l2e) & _PAGE_PSE) )
-        return INVALID_MFN;
-
-    l1t = map_domain_page(mfn);
-    l1e = l1t[l1_table_offset(vaddr)];
-    mfn = l1e_get_pfn(l1e);
-    unmap_domain_page(l1t);
-
-    return mfn_valid(mfn) ? mfn : INVALID_MFN;
-}
-#endif  /* defined(__x86_64__) */
-
 /* Returns: number of bytes remaining to be copied */
 static int
 dbg_rw_guest_mem(dbgva_t addr, dbgbyte_t *buf, int len, struct domain *dp, 
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/domain.c     Wed Sep 12 13:29:30 2012 +0100
@@ -184,11 +184,8 @@ struct domain *alloc_domain_struct(void)
      * We pack the PDX of the domain structure into a 32-bit field within
      * the page_info structure. Hence the MEMF_bits() restriction.
      */
-    unsigned int bits = 32 + PAGE_SHIFT;
+    unsigned int bits = 32 + PAGE_SHIFT + pfn_pdx_hole_shift;
 
-#ifdef __x86_64__
-    bits += pfn_pdx_hole_shift;
-#endif
     BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE);
     d = alloc_xenheap_pages(0, MEMF_bits(bits));
     if ( d != NULL )
@@ -232,9 +229,6 @@ struct vcpu_guest_context *alloc_vcpu_gu
     enum fixed_addresses idx = FIX_VGC_BEGIN -
         cpu * PFN_UP(sizeof(struct vcpu_guest_context));
 
-#ifdef __i386__
-    BUILD_BUG_ON(sizeof(struct vcpu_guest_context) > PAGE_SIZE);
-#endif
     BUG_ON(per_cpu(vgc_pages[0], cpu) != NULL);
 
     for ( i = 0; i < PFN_UP(sizeof(struct vcpu_guest_context)); ++i )
@@ -270,8 +264,6 @@ void free_vcpu_guest_context(struct vcpu
     }
 }
 
-#ifdef __x86_64__
-
 static int setup_compat_l4(struct vcpu *v)
 {
     struct page_info *pg;
@@ -376,11 +368,6 @@ int switch_compat(struct domain *d)
     return -ENOMEM;
 }
 
-#else
-#define setup_compat_l4(v) 0
-#define release_compat_l4(v) ((void)0)
-#endif
-
 static inline bool_t standalone_trap_ctxt(struct vcpu *v)
 {
     BUILD_BUG_ON(256 * sizeof(*v->arch.pv_vcpu.trap_ctxt) > PAGE_SIZE);
@@ -390,31 +377,23 @@ static inline bool_t standalone_trap_ctx
 int vcpu_initialise(struct vcpu *v)
 {
     struct domain *d = v->domain;
+    unsigned int idx;
     int rc;
 
     v->arch.flags = TF_kernel_mode;
 
-#if defined(__i386__)
-    mapcache_vcpu_init(v);
-#else
+    idx = perdomain_pt_pgidx(v);
+    if ( !perdomain_pt_page(d, idx) )
     {
-        unsigned int idx = perdomain_pt_pgidx(v);
         struct page_info *pg;
-
-        if ( !perdomain_pt_page(d, idx) )
-        {
-            pg = alloc_domheap_page(NULL, MEMF_node(vcpu_to_node(v)));
-            if ( !pg )
-                return -ENOMEM;
-            clear_page(page_to_virt(pg));
-            perdomain_pt_page(d, idx) = pg;
-            d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)+idx]
-                = l2e_from_page(pg, __PAGE_HYPERVISOR);
-        }
+        pg = alloc_domheap_page(NULL, MEMF_node(vcpu_to_node(v)));
+        if ( !pg )
+            return -ENOMEM;
+        clear_page(page_to_virt(pg));
+        perdomain_pt_page(d, idx) = pg;
+        d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)+idx]
+            = l2e_from_page(pg, __PAGE_HYPERVISOR);
     }
-#endif
-
-    pae_l3_cache_init(&v->arch.pae_l3_cache);
 
     paging_vcpu_init(v);
 
@@ -499,11 +478,7 @@ void vcpu_destroy(struct vcpu *v)
 
 int arch_domain_create(struct domain *d, unsigned int domcr_flags)
 {
-#ifdef __x86_64__
     struct page_info *pg;
-#else
-    int pdpt_order;
-#endif
     int i, paging_initialised = 0;
     int rc = -ENOMEM;
 
@@ -520,18 +495,6 @@ int arch_domain_create(struct domain *d,
     d->arch.relmem = RELMEM_not_started;
     INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
 
-#if defined(__i386__)
-
-    pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
-    d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order, 0);
-    if ( d->arch.mm_perdomain_pt == NULL )
-        goto fail;
-    memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE << pdpt_order);
-
-    mapcache_domain_init(d);
-
-#else /* __x86_64__ */
-
     if ( d->domain_id && !is_idle_domain(d) &&
          cpu_has_amd_erratum(&boot_cpu_data, AMD_ERRATUM_121) )
     {
@@ -572,8 +535,6 @@ int arch_domain_create(struct domain *d,
     HYPERVISOR_COMPAT_VIRT_START(d) =
         is_hvm_domain(d) ? ~0u : __HYPERVISOR_COMPAT_VIRT_START;
 
-#endif /* __x86_64__ */
-
     if ( (rc = paging_domain_init(d, domcr_flags)) != 0 )
         goto fail;
     paging_initialised = 1;
@@ -647,24 +608,18 @@ int arch_domain_create(struct domain *d,
     free_xenheap_page(d->shared_info);
     if ( paging_initialised )
         paging_final_teardown(d);
-#ifdef __x86_64__
     if ( d->arch.mm_perdomain_l2 )
         free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
     if ( d->arch.mm_perdomain_l3 )
         free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
     if ( d->arch.mm_perdomain_pt_pages )
         free_domheap_page(virt_to_page(d->arch.mm_perdomain_pt_pages));
-#else
-    free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order);
-#endif
     return rc;
 }
 
 void arch_domain_destroy(struct domain *d)
 {
-#ifdef __x86_64__
     unsigned int i;
-#endif
 
     if ( is_hvm_domain(d) )
         hvm_domain_destroy(d);
@@ -678,11 +633,6 @@ void arch_domain_destroy(struct domain *
 
     paging_final_teardown(d);
 
-#ifdef __i386__
-    free_xenheap_pages(
-        d->arch.mm_perdomain_pt,
-        get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t)));
-#else
     for ( i = 0; i < PDPT_L2_ENTRIES; ++i )
     {
         if ( perdomain_pt_page(d, i) )
@@ -691,7 +641,6 @@ void arch_domain_destroy(struct domain *
     free_domheap_page(virt_to_page(d->arch.mm_perdomain_pt_pages));
     free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
     free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
-#endif
 
     free_xenheap_page(d->shared_info);
     cleanup_domain_irq_mapping(d);
@@ -751,21 +700,15 @@ int arch_set_info_guest(
     {
         if ( !compat )
         {
-#ifdef __x86_64__
             if ( !is_canonical_address(c.nat->user_regs.eip) ||
                  !is_canonical_address(c.nat->event_callback_eip) ||
                  !is_canonical_address(c.nat->syscall_callback_eip) ||
                  !is_canonical_address(c.nat->failsafe_callback_eip) )
                 return -EINVAL;
-#endif
 
             fixup_guest_stack_selector(d, c.nat->user_regs.ss);
             fixup_guest_stack_selector(d, c.nat->kernel_ss);
             fixup_guest_code_selector(d, c.nat->user_regs.cs);
-#ifdef __i386__
-            fixup_guest_code_selector(d, c.nat->event_callback_cs);
-            fixup_guest_code_selector(d, c.nat->failsafe_callback_cs);
-#endif
 
             for ( i = 0; i < 256; i++ )
             {
@@ -863,7 +806,6 @@ int arch_set_info_guest(
         if ( !compat )
         {
             fail = xen_pfn_to_cr3(pfn) != c.nat->ctrlreg[3];
-#ifdef CONFIG_X86_64
             if ( pagetable_is_null(v->arch.guest_table_user) )
                 fail |= c.nat->ctrlreg[1] || !(flags & VGCF_in_kernel);
             else
@@ -876,7 +818,6 @@ int arch_set_info_guest(
 
             pfn = l4e_get_pfn(*l4tab);
             fail = compat_pfn_to_cr3(pfn) != c.cmp->ctrlreg[3];
-#endif
         }
 
         for ( i = 0; i < ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames); ++i )
@@ -897,7 +838,6 @@ int arch_set_info_guest(
 
     v->arch.pv_vcpu.event_callback_eip = c(event_callback_eip);
     v->arch.pv_vcpu.failsafe_callback_eip = c(failsafe_callback_eip);
-#ifdef CONFIG_X86_64
     if ( !compat )
     {
         v->arch.pv_vcpu.syscall_callback_eip = c.nat->syscall_callback_eip;
@@ -906,7 +846,6 @@ int arch_set_info_guest(
         v->arch.pv_vcpu.gs_base_user = c.nat->gs_base_user;
     }
     else
-#endif
     {
         v->arch.pv_vcpu.event_callback_cs = c(event_callback_cs);
         v->arch.pv_vcpu.failsafe_callback_cs = c(failsafe_callback_cs);
@@ -968,7 +907,6 @@ int arch_set_info_guest(
         }
 
         v->arch.guest_table = pagetable_from_page(cr3_page);
-#ifdef __x86_64__
         if ( c.nat->ctrlreg[1] )
         {
             cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[1]);
@@ -1022,7 +960,6 @@ int arch_set_info_guest(
         l4tab = __va(pagetable_get_paddr(v->arch.guest_table));
         *l4tab = l4e_from_pfn(page_to_mfn(cr3_page),
             _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
-#endif
     }
 
     if ( v->vcpu_id == 0 )
@@ -1256,8 +1193,6 @@ arch_do_vcpu_op(
     return rc;
 }
 
-#ifdef __x86_64__
-
 #define loadsegment(seg,value) ({               \
     int __r = 1;                                \
     asm volatile (                              \
@@ -1483,20 +1418,6 @@ static void save_segments(struct vcpu *v
 
 #define switch_kernel_stack(v) ((void)0)
 
-#elif defined(__i386__)
-
-#define load_segments(n) ((void)0)
-#define save_segments(p) ((void)0)
-
-static inline void switch_kernel_stack(struct vcpu *v)
-{
-    struct tss_struct *tss = &this_cpu(init_tss);
-    tss->esp1 = v->arch.pv_vcpu.kernel_sp;
-    tss->ss1  = v->arch.pv_vcpu.kernel_ss;
-}
-
-#endif /* __i386__ */
-
 static void paravirt_ctxt_switch_from(struct vcpu *v)
 {
     save_segments(v);
@@ -1812,7 +1733,6 @@ unsigned long hypercall_create_continuat
         else
             current->arch.hvm_vcpu.hcall_preempted = 1;
 
-#ifdef __x86_64__
         if ( !is_hvm_vcpu(current) ?
              !is_pv_32on64_vcpu(current) :
              (hvm_guest_x86_mode(current) == 8) )
@@ -1832,7 +1752,6 @@ unsigned long hypercall_create_continuat
             }
         }
         else
-#endif
         {
             if ( supervisor_mode_kernel )
                 regs->eip &= ~31; /* re-execute entire hypercall entry stub */
@@ -2066,7 +1985,6 @@ static void vcpu_destroy_pagetables(stru
     struct domain *d = v->domain;
     unsigned long pfn;
 
-#ifdef __x86_64__
     if ( is_pv_32on64_vcpu(v) )
     {
         pfn = l4e_get_pfn(*(l4_pgentry_t *)
@@ -2087,7 +2005,6 @@ static void vcpu_destroy_pagetables(stru
         v->arch.cr3 = 0;
         return;
     }
-#endif
 
     pfn = pagetable_get_pfn(v->arch.guest_table);
     if ( pfn != 0 )
@@ -2099,7 +2016,6 @@ static void vcpu_destroy_pagetables(stru
         v->arch.guest_table = pagetable_null();
     }
 
-#ifdef __x86_64__
     /* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */
     pfn = pagetable_get_pfn(v->arch.guest_table_user);
     if ( pfn != 0 )
@@ -2113,7 +2029,6 @@ static void vcpu_destroy_pagetables(stru
         }
         v->arch.guest_table_user = pagetable_null();
     }
-#endif
 
     v->arch.cr3 = 0;
 }
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/domain_build.c       Wed Sep 12 13:29:30 2012 +0100
@@ -126,12 +126,6 @@ boolean_param("dom0_shadow", opt_dom0_sh
 static char __initdata opt_dom0_ioports_disable[200] = "";
 string_param("dom0_ioports_disable", opt_dom0_ioports_disable);
 
-#if defined(__i386__)
-/* No ring-3 access in initial leaf page tables. */
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
-#define L3_PROT (_PAGE_PRESENT)
-#elif defined(__x86_64__)
 /* Allow ring-3 access in long mode as guest cannot use ring 1 ... */
 #define BASE_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
 #define L1_PROT (BASE_PROT|_PAGE_GUEST_KERNEL)
@@ -140,7 +134,6 @@ string_param("dom0_ioports_disable", opt
 #define L2_PROT (BASE_PROT|_PAGE_DIRTY)
 #define L3_PROT (BASE_PROT|_PAGE_DIRTY)
 #define L4_PROT (BASE_PROT|_PAGE_DIRTY)
-#endif
 
 #define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
 #define round_pgdown(_p)  ((_p)&PAGE_MASK)
@@ -238,7 +231,6 @@ static unsigned long __init compute_dom0
     nr_pages = min(nr_pages, max_pages);
     nr_pages = min(nr_pages, avail);
 
-#ifdef __x86_64__
     if ( (parms->p2m_base == UNSET_ADDR) && (dom0_nrpages <= 0) &&
          ((dom0_min_nrpages <= 0) || (nr_pages > min_pages)) )
     {
@@ -271,7 +263,6 @@ static unsigned long __init compute_dom0
             printk("Dom0 memory clipped to %lu pages\n", nr_pages);
         }
     }
-#endif
 
     d->max_pages = min_t(unsigned long, max_pages, UINT_MAX);
 
@@ -443,7 +434,6 @@ int __init construct_dom0(
         return -EINVAL;
     }
 
-#if defined(__x86_64__)
     if ( compat32 )
     {
         d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
@@ -451,7 +441,6 @@ int __init construct_dom0(
         if ( setup_compat_arg_xlat(v) != 0 )
             BUG();
     }
-#endif
 
     nr_pages = compute_dom0_nr_pages(d, &parms, initrd_len);
 
@@ -463,15 +452,10 @@ int __init construct_dom0(
         unsigned long mask = (1UL << L2_PAGETABLE_SHIFT) - 1;
         value = (parms.virt_hv_start_low + mask) & ~mask;
         BUG_ON(!is_pv_32bit_domain(d));
-#if defined(__i386__)
-        if ( value > HYPERVISOR_VIRT_START )
-            panic("Domain 0 expects too high a hypervisor start address.\n");
-#else
         if ( value > __HYPERVISOR_COMPAT_VIRT_START )
             panic("Domain 0 expects too high a hypervisor start address.\n");
         HYPERVISOR_COMPAT_VIRT_START(d) =
             max_t(unsigned int, m2p_compat_vstart, value);
-#endif
     }
 
     if ( (parms.p2m_base != UNSET_ADDR) && elf_32bit(&elf) )
@@ -521,12 +505,6 @@ int __init construct_dom0(
         v_end            = (vstack_end + (1UL<<22)-1) & ~((1UL<<22)-1);
         if ( (v_end - vstack_end) < (512UL << 10) )
             v_end += 1UL << 22; /* Add extra 4MB to get >= 512kB padding. */
-#if defined(__i386__)
-        /* 5 pages: 1x 3rd + 4x 2nd level */
-        if ( (((v_end - v_start + ((1UL<<L2_PAGETABLE_SHIFT)-1)) >>
-               L2_PAGETABLE_SHIFT) + 5) <= nr_pt_pages )
-            break;
-#elif defined(__x86_64__)
 #define NR(_l,_h,_s) \
     (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
        ((_l) & ~((1UL<<(_s))-1))) >> (_s))
@@ -538,7 +516,6 @@ int __init construct_dom0(
               NR(v_start, v_end, L2_PAGETABLE_SHIFT))  /* # L1 */
              <= nr_pt_pages )
             break;
-#endif
     }
 
     count = v_end - v_start;
@@ -553,12 +530,7 @@ int __init construct_dom0(
         vphysmap_start = parms.p2m_base;
         vphysmap_end   = vphysmap_start + nr_pages * sizeof(unsigned long);
     }
-#ifdef __i386__
-    if ( !test_bit(XENFEAT_pae_pgdir_above_4gb, parms.f_supported) )
-        page = alloc_domheap_pages(d, order, MEMF_bits(32));
-    else
-#endif
-        page = alloc_domheap_pages(d, order, 0);
+    page = alloc_domheap_pages(d, order, 0);
     if ( page == NULL )
         panic("Not enough RAM for domain 0 allocation.\n");
     alloc_spfn = page_to_mfn(page);
@@ -571,7 +543,6 @@ int __init construct_dom0(
                      d->tot_pages;
         initrd_mfn = mfn = initrd->mod_start;
         count = PFN_UP(initrd_len);
-#ifdef __x86_64__
         if ( d->arch.physaddr_bitsize &&
              ((mfn + count - 1) >> (d->arch.physaddr_bitsize - PAGE_SHIFT)) )
         {
@@ -593,10 +564,11 @@ int __init construct_dom0(
             initrd->mod_start = initrd_mfn = page_to_mfn(page);
         }
         else
-#endif
+        {
             while ( count-- )
                 if ( assign_pages(d, mfn_to_page(mfn++), 0, 0) )
                     BUG();
+        }
         initrd->mod_end = 0;
     }
 
@@ -633,120 +605,6 @@ int __init construct_dom0(
     if ( vinitrd_start )
         mpt_alloc -= PAGE_ALIGN(initrd_len);
 
-#if defined(__i386__)
-    /*
-     * Protect the lowest 1GB of memory. We use a temporary mapping there
-     * from which we copy the kernel and ramdisk images.
-     */
-    if ( v_start < (1UL<<30) )
-    {
-        printk("Initial loading isn't allowed to lowest 1GB of memory.\n");
-        return -EINVAL;
-    }
-
-    mpt.mod_start = mpt_alloc >> PAGE_SHIFT;
-    mpt.mod_end   = vpt_end - vpt_start;
-    mpt_ptr = bootstrap_map(&mpt);
-#define MPT_ALLOC(n) (mpt_ptr += (n)*PAGE_SIZE, mpt_alloc += (n)*PAGE_SIZE)
-
-    /* WARNING: The new domain must have its 'processor' field filled in! */
-    l3start = l3tab = mpt_ptr; MPT_ALLOC(1);
-    l2start = l2tab = mpt_ptr; MPT_ALLOC(4);
-    for (i = 0; i < L3_PAGETABLE_ENTRIES; i++) {
-        if ( i < 3 )
-            clear_page(l2tab + i * L2_PAGETABLE_ENTRIES);
-        else
-            copy_page(l2tab + i * L2_PAGETABLE_ENTRIES,
-                      idle_pg_table_l2 + i * L2_PAGETABLE_ENTRIES);
-        l3tab[i] = l3e_from_pfn(mpt.mod_start + 1 + i, L3_PROT);
-        l2tab[(LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT)+i] =
-            l2e_from_pfn(mpt.mod_start + 1 + i, __PAGE_HYPERVISOR);
-    }
-    v->arch.guest_table = pagetable_from_pfn(mpt.mod_start);
-
-    for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
-        l2tab[l2_linear_offset(PERDOMAIN_VIRT_START) + i] =
-            l2e_from_page(perdomain_pt_page(d, i), __PAGE_HYPERVISOR);
-
-    l2tab += l2_linear_offset(v_start);
-    pfn = alloc_spfn;
-    for ( count = 0; count < ((v_end-v_start)>>PAGE_SHIFT); count++ )
-    {
-        if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
-        {
-            l1tab = mpt_ptr;
-            *l2tab = l2e_from_paddr(mpt_alloc, L2_PROT);
-            MPT_ALLOC(1);
-            l2tab++;
-            clear_page(l1tab);
-            if ( count == 0 )
-                l1tab += l1_table_offset(v_start);
-        }
-        if ( count < initrd_pfn || count >= initrd_pfn + PFN_UP(initrd_len) )
-            mfn = pfn++;
-        else
-            mfn = initrd_mfn++;
-        *l1tab = l1e_from_pfn(mfn, L1_PROT);
-        l1tab++;
-        
-        page = mfn_to_page(mfn);
-        if ( !get_page_and_type(page, d, PGT_writable_page) )
-            BUG();
-    }
-#undef MPT_ALLOC
-
-    /* Pages that are part of page tables must be read only. */
-    mpt_alloc = (paddr_t)mpt.mod_start << PAGE_SHIFT;
-    mpt_ptr = l3start;
-    l2tab = l2start + l2_linear_offset(vpt_start);
-    l1start = mpt_ptr + (l2e_get_paddr(*l2tab) - mpt_alloc);
-    l1tab = l1start + l1_table_offset(vpt_start);
-    for ( count = 0; count < nr_pt_pages; count++ ) 
-    {
-        page = mfn_to_page(l1e_get_pfn(*l1tab));
-        if ( !opt_dom0_shadow )
-            l1e_remove_flags(*l1tab, _PAGE_RW);
-        else
-            if ( !get_page_type(page, PGT_writable_page) )
-                BUG();
-
-        switch ( count )
-        {
-        case 0:
-            page->u.inuse.type_info &= ~PGT_type_mask;
-            page->u.inuse.type_info |= PGT_l3_page_table;
-            get_page(page, d); /* an extra ref because of readable mapping */
-
-            /* Get another ref to L3 page so that it can be pinned. */
-            page->u.inuse.type_info++;
-            page->count_info++;
-            set_bit(_PGT_pinned, &page->u.inuse.type_info);
-            break;
-        case 1 ... 4:
-            page->u.inuse.type_info &= ~PGT_type_mask;
-            page->u.inuse.type_info |= PGT_l2_page_table;
-            if ( count == 4 )
-                page->u.inuse.type_info |= PGT_pae_xen_l2;
-            get_page(page, d); /* an extra ref because of readable mapping */
-            break;
-        default:
-            page->u.inuse.type_info &= ~PGT_type_mask;
-            page->u.inuse.type_info |= PGT_l1_page_table;
-            get_page(page, d); /* an extra ref because of readable mapping */
-            break;
-        }
-        if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
-            l1tab = mpt_ptr + (l2e_get_paddr(*++l2tab) - mpt_alloc);
-    }
-
-    /*
-     * Put Xen's first L3 entry into Dom0's page tables so that updates
-     * through bootstrap_map() will affect the page tables we will run on.
-     */
-    l3start[0] = l3e_from_paddr(__pa(idle_pg_table_l2), L3_PROT);
-
-#elif defined(__x86_64__)
-
     /* Overlap with Xen protected area? */
     if ( !is_pv_32on64_domain(d) ?
          ((v_start < HYPERVISOR_VIRT_END) &&
@@ -903,8 +761,6 @@ int __init construct_dom0(
         }
     }
 
-#endif /* __x86_64__ */
-
     /* Mask all upcalls... */
     for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
         shared_info(d, vcpu_info[i].evtchn_upcall_mask) = 1;
@@ -971,7 +827,6 @@ int __init construct_dom0(
              elf_64bit(&elf) ? 64 : 32, parms.pae ? "p" : "");
 
     count = d->tot_pages;
-#ifdef __x86_64__
     /* Set up the phys->machine table if not part of the initial mapping. */
     if ( parms.p2m_base != UNSET_ADDR )
     {
@@ -1071,7 +926,6 @@ int __init construct_dom0(
         if ( !page )
             panic("Not enough RAM for DOM0 P->M table.\n");
     }
-#endif
 
     /* Write the phys->machine and machine->phys table entries. */
     for ( pfn = 0; pfn < count; pfn++ )
@@ -1158,34 +1012,12 @@ int __init construct_dom0(
         si->console.dom0.info_size = sizeof(struct dom0_vga_console_info);
     }
 
-#if defined(__x86_64__)
     if ( is_pv_32on64_domain(d) )
         xlat_start_info(si, XLAT_start_info_console_dom0);
-#endif
 
     /* Return to idle domain's page tables. */
     write_ptbase(current);
 
-#if defined(__i386__)
-    /* Restore Dom0's first L3 entry. */
-    mpt.mod_end = 5 * PAGE_SIZE;
-    l3start = mpt_ptr = bootstrap_map(&mpt);
-    l2start = mpt_ptr + PAGE_SIZE;
-    l3start[0] = l3e_from_pfn(mpt.mod_start + 1, L3_PROT);
-
-    /* Re-setup CR3  */
-    if ( paging_mode_enabled(d) )
-        paging_update_paging_modes(v);
-    else
-        update_cr3(v);
-
-    /*
-     * Destroy low mappings - they were only for our convenience. Note
-     * that zap_low_mappings() exceeds what bootstrap_map(NULL) would do.
-     */
-    zap_low_mappings(l2start);
-#endif
-
     update_domain_wallclock_time(d);
 
     v->is_initialised = 1;
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/domctl.c     Wed Sep 12 13:29:30 2012 +0100
@@ -156,7 +156,6 @@ long arch_do_domctl(
     break;
 
     case XEN_DOMCTL_getpageframeinfo3:
-#ifdef __x86_64__
         if (!has_32bit_shinfo(current->domain))
         {
             unsigned int n, j;
@@ -258,7 +257,6 @@ long arch_do_domctl(
             rcu_unlock_domain(d);
             break;
         }
-#endif
         /* fall thru */
     case XEN_DOMCTL_getpageframeinfo2:
     {
@@ -1004,7 +1002,6 @@ long arch_do_domctl(
         if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext )
         {
             evc->size = sizeof(*evc);
-#ifdef __x86_64__
             if ( !is_hvm_domain(d) )
             {
                 evc->sysenter_callback_cs      =
@@ -1021,7 +1018,6 @@ long arch_do_domctl(
                     v->arch.pv_vcpu.syscall32_disables_events;
             }
             else
-#endif
             {
                 evc->sysenter_callback_cs      = 0;
                 evc->sysenter_callback_eip     = 0;
@@ -1037,7 +1033,6 @@ long arch_do_domctl(
             ret = -EINVAL;
             if ( evc->size < offsetof(typeof(*evc), mcg_cap) )
                 goto ext_vcpucontext_out;
-#ifdef __x86_64__
             if ( !is_hvm_domain(d) )
             {
                 if ( !is_canonical_address(evc->sysenter_callback_eip) ||
@@ -1059,7 +1054,6 @@ long arch_do_domctl(
                     evc->syscall32_disables_events;
             }
             else
-#endif
             /* We do not support syscall/syscall32/sysenter on 32-bit Xen. */
             if ( (evc->sysenter_callback_cs & ~3) ||
                  evc->sysenter_callback_eip ||
@@ -1443,7 +1437,6 @@ long arch_do_domctl(
     }
     break;
 
-#ifdef __x86_64__
     case XEN_DOMCTL_mem_event_op:
     {
         struct domain *d;
@@ -1477,7 +1470,6 @@ long arch_do_domctl(
         } 
     }
     break;
-#endif /* __x86_64__ */
 
 #if P2M_AUDIT
     case XEN_DOMCTL_audit_p2m:
@@ -1594,12 +1586,9 @@ void arch_get_info_guest(struct vcpu *v,
         c.nat->user_regs.es = sreg.sel;
         hvm_get_segment_register(v, x86_seg_fs, &sreg);
         c.nat->user_regs.fs = sreg.sel;
-#ifdef __x86_64__
         c.nat->fs_base = sreg.base;
-#endif
         hvm_get_segment_register(v, x86_seg_gs, &sreg);
         c.nat->user_regs.gs = sreg.sel;
-#ifdef __x86_64__
         if ( ring_0(&c.nat->user_regs) )
         {
             c.nat->gs_base_kernel = sreg.base;
@@ -1610,7 +1599,6 @@ void arch_get_info_guest(struct vcpu *v,
             c.nat->gs_base_user = sreg.base;
             c.nat->gs_base_kernel = hvm_get_shadow_gs_base(v);
         }
-#endif
     }
     else
     {
@@ -1631,7 +1619,6 @@ void arch_get_info_guest(struct vcpu *v,
             c(ctrlreg[i] = v->arch.pv_vcpu.ctrlreg[i]);
         c(event_callback_eip = v->arch.pv_vcpu.event_callback_eip);
         c(failsafe_callback_eip = v->arch.pv_vcpu.failsafe_callback_eip);
-#ifdef CONFIG_X86_64
         if ( !compat )
         {
             c.nat->syscall_callback_eip = v->arch.pv_vcpu.syscall_callback_eip;
@@ -1640,7 +1627,6 @@ void arch_get_info_guest(struct vcpu *v,
             c.nat->gs_base_user = v->arch.pv_vcpu.gs_base_user;
         }
         else
-#endif
         {
             c(event_callback_cs = v->arch.pv_vcpu.event_callback_cs);
             c(failsafe_callback_cs = v->arch.pv_vcpu.failsafe_callback_cs);
@@ -1655,11 +1641,9 @@ void arch_get_info_guest(struct vcpu *v,
         {
             c.nat->ctrlreg[3] = xen_pfn_to_cr3(
                 pagetable_get_pfn(v->arch.guest_table));
-#ifdef __x86_64__
             c.nat->ctrlreg[1] =
                 pagetable_is_null(v->arch.guest_table_user) ? 0
                 : xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table_user));
-#endif
 
             /* Merge shadow DR7 bits into real DR7. */
             c.nat->debugreg[7] |= c.nat->debugreg[5];
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/e820.c
--- a/xen/arch/x86/e820.c       Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/e820.c       Wed Sep 12 13:29:30 2012 +0100
@@ -514,6 +514,7 @@ static void __init reserve_dmi_region(vo
 static void __init machine_specific_memory_setup(
     struct e820entry *raw, int *raw_nr)
 {
+    unsigned long mpt_limit, ro_mpt_limit;
     uint64_t top_of_ram, size;
     int i;
 
@@ -536,25 +537,15 @@ static void __init machine_specific_memo
                 NULL);
     }
 
-#ifdef __i386__
-    clip_to_limit((1ULL << 30) * MACHPHYS_MBYTES,
-                  "Only the first %lu GB of the physical memory map "
-                  "can be accessed by Xen in 32-bit mode.");
-#else
-    {
-        unsigned long mpt_limit, ro_mpt_limit;
-
-        mpt_limit = ((RDWR_MPT_VIRT_END - RDWR_MPT_VIRT_START)
-                     / sizeof(unsigned long)) << PAGE_SHIFT;
-        ro_mpt_limit = ((RO_MPT_VIRT_END - RO_MPT_VIRT_START)
-                        / sizeof(unsigned long)) << PAGE_SHIFT;
-        if ( mpt_limit > ro_mpt_limit )
-            mpt_limit = ro_mpt_limit;
-        clip_to_limit(mpt_limit,
-                      "Only the first %lu GB of the physical "
-                      "memory map can be accessed by Xen.");
-    }
-#endif
+    mpt_limit = ((RDWR_MPT_VIRT_END - RDWR_MPT_VIRT_START)
+                 / sizeof(unsigned long)) << PAGE_SHIFT;
+    ro_mpt_limit = ((RO_MPT_VIRT_END - RO_MPT_VIRT_START)
+                    / sizeof(unsigned long)) << PAGE_SHIFT;
+    if ( mpt_limit > ro_mpt_limit )
+        mpt_limit = ro_mpt_limit;
+    clip_to_limit(mpt_limit,
+                  "Only the first %lu GB of the physical "
+                  "memory map can be accessed by Xen.");
 
     reserve_dmi_region();
 
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/extable.c
--- a/xen/arch/x86/extable.c    Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/extable.c    Wed Sep 12 13:29:30 2012 +0100
@@ -6,12 +6,7 @@
 #include <xen/spinlock.h>
 #include <asm/uaccess.h>
 
-#ifdef __i386__
-#define EX_FIELD(ptr, field) (ptr)->field
-#define swap_ex NULL
-#else
 #define EX_FIELD(ptr, field) ((unsigned long)&(ptr)->field + (ptr)->field)
-#endif
 
 static inline unsigned long ex_addr(const struct exception_table_entry *x)
 {
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/genapic/x2apic.c
--- a/xen/arch/x86/genapic/x2apic.c     Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/genapic/x2apic.c     Wed Sep 12 13:29:30 2012 +0100
@@ -138,19 +138,7 @@ void __init check_x2apic_preenabled(void
     if ( lo & MSR_IA32_APICBASE_EXTD )
     {
         printk("x2APIC mode is already enabled by BIOS.\n");
-#ifndef __i386__
         x2apic_enabled = 1;
         genapic = apic_x2apic_probe();
-#else
-        lo &= ~(MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD);
-        wrmsr(MSR_IA32_APICBASE, lo, hi);
-        lo |= MSR_IA32_APICBASE_ENABLE;
-        wrmsr(MSR_IA32_APICBASE, lo, hi);
-        printk("x2APIC disabled permanently on x86_32.\n");
-#endif
     }
-
-#ifdef __i386__
-    clear_bit(X86_FEATURE_X2APIC, boot_cpu_data.x86_capability);
-#endif
 }
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/hvm/hvm.c    Wed Sep 12 13:29:30 2012 +0100
@@ -681,7 +681,6 @@ static int hvm_save_cpu_ctxt(struct doma
         ctxt.rsp = v->arch.user_regs.esp;
         ctxt.rip = v->arch.user_regs.eip;
         ctxt.rflags = v->arch.user_regs.eflags;
-#ifdef __x86_64__
         ctxt.r8  = v->arch.user_regs.r8;
         ctxt.r9  = v->arch.user_regs.r9;
         ctxt.r10 = v->arch.user_regs.r10;
@@ -690,7 +689,6 @@ static int hvm_save_cpu_ctxt(struct doma
         ctxt.r13 = v->arch.user_regs.r13;
         ctxt.r14 = v->arch.user_regs.r14;
         ctxt.r15 = v->arch.user_regs.r15;
-#endif
         ctxt.dr0 = v->arch.debugreg[0];
         ctxt.dr1 = v->arch.debugreg[1];
         ctxt.dr2 = v->arch.debugreg[2];
@@ -867,7 +865,6 @@ static int hvm_load_cpu_ctxt(struct doma
     v->arch.user_regs.esp = ctxt.rsp;
     v->arch.user_regs.eip = ctxt.rip;
     v->arch.user_regs.eflags = ctxt.rflags | 2;
-#ifdef __x86_64__
     v->arch.user_regs.r8  = ctxt.r8;
     v->arch.user_regs.r9  = ctxt.r9;
     v->arch.user_regs.r10 = ctxt.r10;
@@ -876,7 +873,6 @@ static int hvm_load_cpu_ctxt(struct doma
     v->arch.user_regs.r13 = ctxt.r13;
     v->arch.user_regs.r14 = ctxt.r14;
     v->arch.user_regs.r15 = ctxt.r15;
-#endif
     v->arch.debugreg[0] = ctxt.dr0;
     v->arch.debugreg[1] = ctxt.dr1;
     v->arch.debugreg[2] = ctxt.dr2;
@@ -1259,9 +1255,7 @@ int hvm_hap_nested_page_fault(paddr_t gp
     struct vcpu *v = current;
     struct p2m_domain *p2m;
     int rc, fall_through = 0, paged = 0;
-#ifdef __x86_64__
     int sharing_enomem = 0;
-#endif
     mem_event_request_t *req_ptr = NULL;
 
     /* On Nested Virtualization, walk the guest page table.
@@ -1370,7 +1364,6 @@ int hvm_hap_nested_page_fault(paddr_t gp
         goto out;
     }
 
-#ifdef __x86_64__
     /* Check if the page has been paged out */
     if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
         paged = 1;
@@ -1384,7 +1377,6 @@ int hvm_hap_nested_page_fault(paddr_t gp
         rc = 1;
         goto out_put_gfn;
     }
-#endif
  
     /* Spurious fault? PoD and log-dirty also take this path. */
     if ( p2m_is_ram(p2mt) )
@@ -1426,7 +1418,6 @@ out:
      * locks in such circumstance */
     if ( paged )
         p2m_mem_paging_populate(v->domain, gfn);
-#ifdef __x86_64__
     if ( sharing_enomem )
     {
         int rv;
@@ -1439,7 +1430,6 @@ out:
             rc = 0;
         }
     }
-#endif
     if ( req_ptr )
     {
         mem_access_send_req(v->domain, req_ptr);
@@ -2625,14 +2615,12 @@ unsigned long copy_to_user_hvm(void *to,
 {
     int rc;
 
-#ifdef __x86_64__
     if ( !current->arch.hvm_vcpu.hcall_64bit &&
          is_compat_arg_xlat_range(to, len) )
     {
         memcpy(to, from, len);
         return 0;
     }
-#endif
 
     rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from,
                                         len, 0);
@@ -2643,14 +2631,12 @@ unsigned long clear_user_hvm(void *to, u
 {
     int rc;
 
-#ifdef __x86_64__
     if ( !current->arch.hvm_vcpu.hcall_64bit &&
          is_compat_arg_xlat_range(to, len) )
     {
         memset(to, 0x00, len);
         return 0;
     }
-#endif
 
     rc = __hvm_clear((unsigned long)to, len);
     return rc ? len : 0; /* fake a copy_to_user() return code */
@@ -2660,14 +2646,12 @@ unsigned long copy_from_user_hvm(void *t
 {
     int rc;
 
-#ifdef __x86_64__
     if ( !current->arch.hvm_vcpu.hcall_64bit &&
          is_compat_arg_xlat_range(from, len) )
     {
         memcpy(to, from, len);
         return 0;
     }
-#endif
 
     rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0);
     return rc ? len : 0; /* fake a copy_from_user() return code */
@@ -3121,24 +3105,6 @@ typedef unsigned long hvm_hypercall_t(
 #define HYPERCALL(x)                                        \
     [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
 
-#if defined(__i386__)
-
-static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
-    [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op,
-    [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
-    [ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op,
-    [ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op,
-    HYPERCALL(xen_version),
-    HYPERCALL(event_channel_op),
-    HYPERCALL(sched_op),
-    HYPERCALL(set_timer_op),
-    HYPERCALL(hvm_op),
-    HYPERCALL(sysctl),
-    HYPERCALL(tmem_op)
-};
-
-#else /* defined(__x86_64__) */
-
 static long hvm_grant_table_op_compat32(unsigned int cmd,
                                         XEN_GUEST_HANDLE(void) uop,
                                         unsigned int count)
@@ -3239,8 +3205,6 @@ static hvm_hypercall_t *hvm_hypercall32_
     HYPERCALL(tmem_op)
 };
 
-#endif /* defined(__x86_64__) */
-
 int hvm_do_hypercall(struct cpu_user_regs *regs)
 {
     struct vcpu *curr = current;
@@ -3250,9 +3214,7 @@ int hvm_do_hypercall(struct cpu_user_reg
 
     switch ( mode )
     {
-#ifdef __x86_64__
     case 8:        
-#endif
     case 4:
     case 2:
         hvm_get_segment_register(curr, x86_seg_ss, &sreg);
@@ -3277,7 +3239,6 @@ int hvm_do_hypercall(struct cpu_user_reg
 
     curr->arch.hvm_vcpu.hcall_preempted = 0;
 
-#ifdef __x86_64__
     if ( mode == 8 )
     {
         HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u(%lx, %lx, %lx, %lx, %lx, %lx)",
@@ -3294,7 +3255,6 @@ int hvm_do_hypercall(struct cpu_user_reg
         curr->arch.hvm_vcpu.hcall_64bit = 0;
     }
     else
-#endif
     {
         HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u(%x, %x, %x, %x, %x, %x)", eax,
                     (uint32_t)regs->ebx, (uint32_t)regs->ecx,
@@ -4369,7 +4329,6 @@ int hvm_debug_op(struct vcpu *v, int32_t
     return rc;
 }
 
-#ifdef __x86_64__
 static int hvm_memory_event_traps(long p, uint32_t reason,
                                   unsigned long value, unsigned long old, 
                                   bool_t gla_valid, unsigned long gla) 
@@ -4462,7 +4421,6 @@ int hvm_memory_event_single_step(unsigne
             MEM_EVENT_REASON_SINGLESTEP,
             gfn, 0, 1, gla);
 }
-#endif /* __x86_64__ */
 
 int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
 {
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/hvm/svm/emulate.c
--- a/xen/arch/x86/hvm/svm/emulate.c    Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/hvm/svm/emulate.c    Wed Sep 12 13:29:30 2012 +0100
@@ -44,9 +44,7 @@ static unsigned int is_prefix(u8 opc)
     case 0xF0:
     case 0xF3:
     case 0xF2:
-#if __x86_64__
     case 0x40 ... 0x4f:
-#endif /* __x86_64__ */
         return 1;
     }
     return 0;
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/hvm/svm/entry.S
--- a/xen/arch/x86/hvm/svm/entry.S      Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/hvm/svm/entry.S      Wed Sep 12 13:29:30 2012 +0100
@@ -33,24 +33,11 @@
 
 #define get_current(reg) GET_CURRENT(r(reg))
         
-#if defined(__x86_64__)
 #define r(reg) %r##reg
 #define addr_of(lbl) lbl(%rip)
 #define call_with_regs(fn)                      \
         mov  %rsp,%rdi;                         \
         call fn;
-#else /* defined(__i386__) */
-#define r(reg) %e##reg
-#define addr_of(lbl) lbl
-#define UREGS_rax UREGS_eax
-#define UREGS_rip UREGS_eip
-#define UREGS_rsp UREGS_esp
-#define call_with_regs(fn)                      \
-        mov  %esp,%eax;                         \
-        push %eax;                              \
-        call fn;                                \
-        add  $4,%esp;
-#endif
 
 ENTRY(svm_asm_do_resume)
         call svm_intr_assist
@@ -93,7 +80,6 @@ UNLIKELY_END(svm_trace)
 
         mov  VCPU_svm_vmcb_pa(r(bx)),r(ax)
 
-#if defined(__x86_64__)
         pop  %r15
         pop  %r14
         pop  %r13
@@ -109,18 +95,9 @@ UNLIKELY_END(svm_trace)
         pop  %rdx
         pop  %rsi
         pop  %rdi
-#else /* defined(__i386__) */
-        pop  %ebx
-        pop  %ecx
-        pop  %edx
-        pop  %esi
-        pop  %edi
-        pop  %ebp
-#endif
 
         VMRUN
 
-#if defined(__x86_64__)
         push %rdi
         push %rsi
         push %rdx
@@ -136,14 +113,6 @@ UNLIKELY_END(svm_trace)
         push %r13
         push %r14
         push %r15
-#else /* defined(__i386__) */
-        push %ebp
-        push %edi
-        push %esi
-        push %edx
-        push %ecx
-        push %ebx
-#endif
 
         get_current(bx)
         movb $0,VCPU_svm_vmcb_in_sync(r(bx))
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Sep 12 13:29:30 2012 +0100
@@ -871,12 +871,10 @@ static void svm_ctxt_switch_from(struct 
     svm_sync_vmcb(v);
     svm_vmload(per_cpu(root_vmcb, cpu));
 
-#ifdef __x86_64__
     /* Resume use of ISTs now that the host TR is reinstated. */
     idt_tables[cpu][TRAP_double_fault].a  |= IST_DF << 32;
     idt_tables[cpu][TRAP_nmi].a           |= IST_NMI << 32;
     idt_tables[cpu][TRAP_machine_check].a |= IST_MCE << 32;
-#endif
 }
 
 static void svm_ctxt_switch_to(struct vcpu *v)
@@ -884,7 +882,6 @@ static void svm_ctxt_switch_to(struct vc
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     int cpu = smp_processor_id();
 
-#ifdef  __x86_64__
     /* 
      * This is required, because VMRUN does consistency check
      * and some of the DOM0 selectors are pointing to 
@@ -902,7 +899,6 @@ static void svm_ctxt_switch_to(struct vc
     idt_tables[cpu][TRAP_double_fault].a  &= ~(7UL << 32);
     idt_tables[cpu][TRAP_nmi].a           &= ~(7UL << 32);
     idt_tables[cpu][TRAP_machine_check].a &= ~(7UL << 32);
-#endif
 
     svm_restore_dr(v);
 
@@ -1222,7 +1218,6 @@ static int svm_cpu_up(void)
     /* Initialize core's ASID handling. */
     svm_asid_init(c);
 
-#ifdef __x86_64__
     /*
      * Check whether EFER.LMSLE can be written.
      * Unfortunately there's no feature bit defined for this.
@@ -1242,7 +1237,6 @@ static int svm_cpu_up(void)
             printk(XENLOG_WARNING "Inconsistent LMSLE support across CPUs!\n");
         cpu_has_lmsl = 0;
     }
-#endif
 
     /* Initialize OSVW bits to be used by guests */
     svm_host_osvw_init();
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/hvm/viridian.c
--- a/xen/arch/x86/hvm/viridian.c       Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/hvm/viridian.c       Wed Sep 12 13:29:30 2012 +0100
@@ -346,13 +346,11 @@ int viridian_hypercall(struct cpu_user_r
 
     switch ( mode )
     {
-#ifdef __x86_64__
     case 8:
         input.raw = regs->rcx;
         input_params_gpa = regs->rdx;
         output_params_gpa = regs->r8;
         break;
-#endif
     case 4:
         input.raw = ((uint64_t)regs->edx << 32) | regs->eax;
         input_params_gpa = ((uint64_t)regs->ebx << 32) | regs->ecx;
@@ -377,11 +375,9 @@ int viridian_hypercall(struct cpu_user_r
 out:
     output.result = status;
     switch (mode) {
-#ifdef __x86_64__
     case 8:
         regs->rax = output.raw;
         break;
-#endif
     default:
         regs->edx = output.raw >> 32;
         regs->eax = output.raw;
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/hvm/vlapic.c Wed Sep 12 13:29:30 2012 +0100
@@ -1184,11 +1184,6 @@ int vlapic_init(struct vcpu *v)
 
     vlapic->pt.source = PTSRC_lapic;
 
-#ifdef __i386__
-    /* 32-bit VMX may be limited to 32-bit physical addresses. */
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
-        memflags |= MEMF_bits(32);
-#endif
     if (vlapic->regs_page == NULL)
     {
         vlapic->regs_page = alloc_domheap_page(NULL, memflags);
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/hvm/vmx/entry.S
--- a/xen/arch/x86/hvm/vmx/entry.S      Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/hvm/vmx/entry.S      Wed Sep 12 13:29:30 2012 +0100
@@ -38,28 +38,15 @@
 
 #define get_current(reg) GET_CURRENT(r(reg))
 
-#if defined(__x86_64__)
 #define r(reg) %r##reg
 #define addr_of(lbl) lbl(%rip)
 #define call_with_regs(fn)                      \
         mov  %rsp,%rdi;                         \
         call fn;
-#else /* defined(__i386__) */
-#define r(reg) %e##reg
-#define addr_of(lbl) lbl
-#define UREGS_rip UREGS_eip
-#define UREGS_rsp UREGS_esp
-#define call_with_regs(fn)                      \
-        mov  %esp,%eax;                         \
-        push %eax;                              \
-        call fn;                                \
-        add  $4,%esp;
-#endif
 
         ALIGN
 .globl vmx_asm_vmexit_handler
 vmx_asm_vmexit_handler:
-#if defined(__x86_64__)
         push %rdi
         push %rsi
         push %rdx
@@ -75,15 +62,6 @@ vmx_asm_vmexit_handler:
         push %r13
         push %r14
         push %r15
-#else /* defined(__i386__) */
-        push %eax
-        push %ebp
-        push %edi
-        push %esi
-        push %edx
-        push %ecx
-        push %ebx
-#endif
 
         get_current(bx)
 
@@ -153,7 +131,6 @@ UNLIKELY_END(realmode)
         VMWRITE(UREGS_eflags)
 
         cmpb $0,VCPU_vmx_launched(r(bx))
-#if defined(__x86_64__)
         pop  %r15
         pop  %r14
         pop  %r13
@@ -169,15 +146,6 @@ UNLIKELY_END(realmode)
         pop  %rdx
         pop  %rsi
         pop  %rdi
-#else /* defined(__i386__) */
-        pop  %ebx
-        pop  %ecx
-        pop  %edx
-        pop  %esi
-        pop  %edi
-        pop  %ebp
-        pop  %eax
-#endif
         je   .Lvmx_launch
 
 /*.Lvmx_resume:*/
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Wed Sep 12 13:29:30 2012 +0100
@@ -145,10 +145,8 @@ static int vmx_init_vmcs_config(void)
 
     min = (CPU_BASED_HLT_EXITING |
            CPU_BASED_VIRTUAL_INTR_PENDING |
-#ifdef __x86_64__
            CPU_BASED_CR8_LOAD_EXITING |
            CPU_BASED_CR8_STORE_EXITING |
-#endif
            CPU_BASED_INVLPG_EXITING |
            CPU_BASED_CR3_LOAD_EXITING |
            CPU_BASED_CR3_STORE_EXITING |
@@ -166,11 +164,9 @@ static int vmx_init_vmcs_config(void)
         "CPU-Based Exec Control", min, opt,
         MSR_IA32_VMX_PROCBASED_CTLS, &mismatch);
     _vmx_cpu_based_exec_control &= ~CPU_BASED_RDTSC_EXITING;
-#ifdef __x86_64__
     if ( _vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW )
         _vmx_cpu_based_exec_control &=
             ~(CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING);
-#endif
 
     if ( _vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS )
     {
@@ -249,18 +245,9 @@ static int vmx_init_vmcs_config(void)
         _vmx_secondary_exec_control &= ~ SECONDARY_EXEC_PAUSE_LOOP_EXITING;
     }
 
-#if defined(__i386__)
-    /* If we can't virtualise APIC accesses, the TPR shadow is pointless. */
-    if ( !(_vmx_secondary_exec_control &
-           SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) )
-        _vmx_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
-#endif
-
     min = VM_EXIT_ACK_INTR_ON_EXIT;
     opt = VM_EXIT_SAVE_GUEST_PAT | VM_EXIT_LOAD_HOST_PAT;
-#ifdef __x86_64__
     min |= VM_EXIT_IA32E_MODE;
-#endif
     _vmx_vmexit_control = adjust_vmx_controls(
         "VMExit Control", min, opt, MSR_IA32_VMX_EXIT_CTLS, &mismatch);
 
@@ -333,7 +320,6 @@ static int vmx_init_vmcs_config(void)
         return -EINVAL;
     }
 
-#ifdef __x86_64__
     /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
     if ( vmx_basic_msr_high & (1u<<16) )
     {
@@ -341,7 +327,6 @@ static int vmx_init_vmcs_config(void)
                smp_processor_id());
         return -EINVAL;
     }
-#endif
 
     /* Require Write-Back (WB) memory type for VMCS accesses. */
     if ( ((vmx_basic_msr_high >> 18) & 15) != 6 )
@@ -866,9 +851,6 @@ static int construct_vmcs(struct vcpu *v
     __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
     __vmwrite(GUEST_DR7, 0);
     __vmwrite(VMCS_LINK_POINTER, ~0UL);
-#if defined(__i386__)
-    __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
-#endif
 
     v->arch.hvm_vmx.exception_bitmap = HVM_TRAP_MASK
               | (paging_mode_hap(d) ? 0 : (1U << TRAP_page_fault))
@@ -889,13 +871,7 @@ static int construct_vmcs(struct vcpu *v
     }
 
     if ( paging_mode_hap(d) )
-    {
         __vmwrite(EPT_POINTER, d->arch.hvm_domain.vmx.ept_control.eptp);
-#ifdef __i386__
-        __vmwrite(EPT_POINTER_HIGH,
-                  d->arch.hvm_domain.vmx.ept_control.eptp >> 32);
-#endif
-    }
 
     if ( cpu_has_vmx_pat && paging_mode_hap(d) )
     {
@@ -906,10 +882,6 @@ static int construct_vmcs(struct vcpu *v
 
         __vmwrite(HOST_PAT, host_pat);
         __vmwrite(GUEST_PAT, guest_pat);
-#ifdef __i386__
-        __vmwrite(HOST_PAT_HIGH, host_pat >> 32);
-        __vmwrite(GUEST_PAT_HIGH, guest_pat >> 32);
-#endif
     }
 
     vmx_vmcs_exit(v);
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Sep 12 13:29:30 2012 +0100
@@ -145,8 +145,6 @@ static void vmx_vcpu_destroy(struct vcpu
     passive_domain_destroy(v);
 }
 
-#ifdef __x86_64__
-
 static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
 
 static u32 msr_index[] =
@@ -338,28 +336,6 @@ static void vmx_restore_guest_msrs(struc
         wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
 }
 
-#else  /* __i386__ */
-
-void vmx_save_host_msrs(void) {}
-#define vmx_restore_host_msrs()     ((void)0)
-
-#define vmx_save_guest_msrs(v)      ((void)0)
-#define vmx_restore_guest_msrs(v)   ((void)0)
-
-static enum handler_return
-long_mode_do_msr_read(unsigned int msr, uint64_t *msr_content)
-{
-    return HNDL_unhandled;
-}
-
-static enum handler_return
-long_mode_do_msr_write(unsigned int msr, uint64_t msr_content)
-{
-    return HNDL_unhandled;
-}
-
-#endif /* __i386__ */
-
 void vmx_update_cpu_exec_control(struct vcpu *v)
 {
     if ( nestedhvm_vcpu_in_guestmode(v) )
@@ -565,7 +541,6 @@ static int vmx_vmcs_restore(struct vcpu 
 
 static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
 {
-#ifdef __x86_64__
     struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
     unsigned long guest_flags = guest_state->flags;
 
@@ -577,14 +552,12 @@ static void vmx_save_cpu_state(struct vc
     data->msr_lstar        = guest_state->msrs[VMX_INDEX_MSR_LSTAR];
     data->msr_star         = guest_state->msrs[VMX_INDEX_MSR_STAR];
     data->msr_syscall_mask = guest_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
-#endif
 
     data->tsc = hvm_get_guest_tsc(v);
 }
 
 static void vmx_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
 {
-#ifdef __x86_64__
     struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
 
     /* restore msrs */
@@ -595,7 +568,6 @@ static void vmx_load_cpu_state(struct vc
 
     v->arch.hvm_vmx.cstar     = data->msr_cstar;
     v->arch.hvm_vmx.shadow_gs = data->shadow_gs;
-#endif
 
     hvm_set_guest_tsc(v, data->tsc);
 }
@@ -942,11 +914,7 @@ static void vmx_set_segment_register(str
 
 static unsigned long vmx_get_shadow_gs_base(struct vcpu *v)
 {
-#ifdef __x86_64__
     return v->arch.hvm_vmx.shadow_gs;
-#else
-    return 0;
-#endif
 }
 
 static int vmx_set_guest_pat(struct vcpu *v, u64 gpat)
@@ -956,9 +924,6 @@ static int vmx_set_guest_pat(struct vcpu
 
     vmx_vmcs_enter(v);
     __vmwrite(GUEST_PAT, gpat);
-#ifdef __i386__
-    __vmwrite(GUEST_PAT_HIGH, gpat >> 32);
-#endif
     vmx_vmcs_exit(v);
     return 1;
 }
@@ -970,9 +935,6 @@ static int vmx_get_guest_pat(struct vcpu
 
     vmx_vmcs_enter(v);
     *gpat = __vmread(GUEST_PAT);
-#ifdef __i386__
-    *gpat |= (u64)__vmread(GUEST_PAT_HIGH) << 32;
-#endif
     vmx_vmcs_exit(v);
     return 1;
 }
@@ -985,9 +947,6 @@ static void vmx_set_tsc_offset(struct vc
         offset += nvmx_get_tsc_offset(v);
 
     __vmwrite(TSC_OFFSET, offset);
-#if defined (__i386__)
-    __vmwrite(TSC_OFFSET_HIGH, offset >> 32);
-#endif
     vmx_vmcs_exit(v);
 }
 
@@ -1074,12 +1033,6 @@ static void vmx_load_pdptrs(struct vcpu 
     __vmwrite(GUEST_PDPTR1, guest_pdptrs[1]);
     __vmwrite(GUEST_PDPTR2, guest_pdptrs[2]);
     __vmwrite(GUEST_PDPTR3, guest_pdptrs[3]);
-#ifdef __i386__
-    __vmwrite(GUEST_PDPTR0_HIGH, guest_pdptrs[0] >> 32);
-    __vmwrite(GUEST_PDPTR1_HIGH, guest_pdptrs[1] >> 32);
-    __vmwrite(GUEST_PDPTR2_HIGH, guest_pdptrs[2] >> 32);
-    __vmwrite(GUEST_PDPTR3_HIGH, guest_pdptrs[3] >> 32);
-#endif
 
     vmx_vmcs_exit(v);
 
@@ -1245,7 +1198,6 @@ static void vmx_update_guest_cr(struct v
 
 static void vmx_update_guest_efer(struct vcpu *v)
 {
-#ifdef __x86_64__
     unsigned long vm_entry_value;
 
     vmx_vmcs_enter(v);
@@ -1258,7 +1210,6 @@ static void vmx_update_guest_efer(struct
     __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
 
     vmx_vmcs_exit(v);
-#endif
 
     if ( v == current )
         write_efer((read_efer() & ~EFER_SCE) |
@@ -1764,14 +1715,6 @@ static const struct lbr_info {
     { MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
     { MSR_C2_LASTBRANCH_0_TO_IP,    NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
     { 0, 0 }
-#ifdef __i386__
-}, pm_lbr[] = {
-    { MSR_IA32_LASTINTFROMIP,       1 },
-    { MSR_IA32_LASTINTTOIP,         1 },
-    { MSR_PM_LASTBRANCH_TOS,        1 },
-    { MSR_PM_LASTBRANCH_0,          NUM_MSR_PM_LASTBRANCH },
-    { 0, 0 }
-#endif
 };
 
 static const struct lbr_info *last_branch_msr_get(void)
@@ -1781,14 +1724,6 @@ static const struct lbr_info *last_branc
     case 6:
         switch ( boot_cpu_data.x86_model )
         {
-#ifdef __i386__
-        /* PentiumM */
-        case 9: case 13:
-        /* Core Solo/Duo */
-        case 14:
-            return pm_lbr;
-            break;
-#endif
         /* Core2 Duo */
         case 15:
         /* Enhanced Core */
@@ -1857,9 +1792,6 @@ static int vmx_msr_read_intercept(unsign
         break;
     case MSR_IA32_DEBUGCTLMSR:
         *msr_content = __vmread(GUEST_IA32_DEBUGCTL);
-#ifdef __i386__
-        *msr_content |= (u64)__vmread(GUEST_IA32_DEBUGCTL_HIGH) << 32;
-#endif
         break;
     case IA32_FEATURE_CONTROL_MSR:
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_TRUE_ENTRY_CTLS:
@@ -2027,9 +1959,6 @@ static int vmx_msr_write_intercept(unsig
         else
         {
             __vmwrite(GUEST_IA32_DEBUGCTL, msr_content);
-#ifdef __i386__
-            __vmwrite(GUEST_IA32_DEBUGCTL_HIGH, msr_content >> 32);
-#endif
         }
 
         break;
@@ -2697,9 +2626,6 @@ void vmx_vmexit_handler(struct cpu_user_
     case EXIT_REASON_EPT_VIOLATION:
     {
         paddr_t gpa = __vmread(GUEST_PHYSICAL_ADDRESS);
-#ifdef __i386__
-        gpa |= (paddr_t)__vmread(GUEST_PHYSICAL_ADDRESS_HIGH) << 32;
-#endif
         exit_qualification = __vmread(EXIT_QUALIFICATION);
         ept_handle_violation(exit_qualification, gpa);
         break;
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/hvm/vmx/vpmu_core2.c
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Wed Sep 12 13:29:30 2012 +0100
@@ -662,7 +662,6 @@ static int core2_vpmu_initialise(struct 
     /* Check the 'Debug Store' feature in the CPUID.EAX[1]:EDX[21] */
     if ( cpu_has(c, X86_FEATURE_DS) )
     {
-#ifdef __x86_64__
         if ( !cpu_has(c, X86_FEATURE_DTES64) )
         {
             printk(XENLOG_G_WARNING "CPU doesn't support 64-bit DS Area"
@@ -670,7 +669,6 @@ static int core2_vpmu_initialise(struct 
                    v->domain->domain_id, v->vcpu_id);
             goto func_out;
         }
-#endif
         vpmu_set(vpmu, VPMU_CPU_HAS_DS);
         rdmsrl(MSR_IA32_MISC_ENABLE, msr_content);
         if ( msr_content & MSR_IA32_MISC_ENABLE_BTS_UNAVAIL )
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c       Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/hvm/vmx/vvmx.c       Wed Sep 12 13:29:30 2012 +0100
@@ -234,7 +234,6 @@ static unsigned long reg_read(struct cpu
     CASE_GET_REG(RSI, esi);
     CASE_GET_REG(RDI, edi);
     CASE_GET_REG(RSP, esp);
-#ifdef CONFIG_X86_64
     CASE_GET_REG(R8, r8);
     CASE_GET_REG(R9, r9);
     CASE_GET_REG(R10, r10);
@@ -243,7 +242,6 @@ static unsigned long reg_read(struct cpu
     CASE_GET_REG(R13, r13);
     CASE_GET_REG(R14, r14);
     CASE_GET_REG(R15, r15);
-#endif
     default:
         break;
     }
@@ -264,7 +262,6 @@ static void reg_write(struct cpu_user_re
     CASE_SET_REG(RSI, esi);
     CASE_SET_REG(RDI, edi);
     CASE_SET_REG(RSP, esp);
-#ifdef CONFIG_X86_64
     CASE_SET_REG(R8, r8);
     CASE_SET_REG(R9, r9);
     CASE_SET_REG(R10, r10);
@@ -273,7 +270,6 @@ static void reg_write(struct cpu_user_re
     CASE_SET_REG(R13, r13);
     CASE_SET_REG(R14, r14);
     CASE_SET_REG(R15, r15);
-#endif
     default:
         break;
     }
@@ -646,10 +642,6 @@ static unsigned long vmcs_gstate_field[]
     /* 64 BITS */
     VMCS_LINK_POINTER,
     GUEST_IA32_DEBUGCTL,
-#ifndef CONFIG_X86_64
-    VMCS_LINK_POINTER_HIGH,
-    GUEST_IA32_DEBUGCTL_HIGH,
-#endif
     /* 32 BITS */
     GUEST_ES_LIMIT,
     GUEST_CS_LIMIT,
@@ -799,9 +791,7 @@ static void virtual_vmentry(struct cpu_u
     struct vcpu *v = current;
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     void *vvmcs = nvcpu->nv_vvmcx;
-#ifdef __x86_64__
     unsigned long lm_l1, lm_l2;
-#endif
 
     vmx_vmcs_switch(v->arch.hvm_vmx.vmcs, nvcpu->nv_n2vmcx);
 
@@ -809,7 +799,6 @@ static void virtual_vmentry(struct cpu_u
     nvcpu->nv_vmentry_pending = 0;
     nvcpu->nv_vmswitch_in_progress = 1;
 
-#ifdef __x86_64__
     /*
      * EFER handling:
      * hvm_set_efer won't work if CR0.PG = 1, so we change the value
@@ -827,15 +816,12 @@ static void virtual_vmentry(struct cpu_u
         v->arch.hvm_vcpu.guest_efer |= EFER_LMA | EFER_LME;
     else
         v->arch.hvm_vcpu.guest_efer &= ~(EFER_LMA | EFER_LME);
-#endif
 
     load_shadow_control(v);
     load_shadow_guest_state(v);
 
-#ifdef __x86_64__
     if ( lm_l1 != lm_l2 )
         paging_update_paging_modes(v);
-#endif
 
     regs->eip = __get_vvmcs(vvmcs, GUEST_RIP);
     regs->esp = __get_vvmcs(vvmcs, GUEST_RSP);
@@ -954,9 +940,7 @@ static void virtual_vmexit(struct cpu_us
 {
     struct vcpu *v = current;
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
-#ifdef __x86_64__
     unsigned long lm_l1, lm_l2;
-#endif
 
     sync_vvmcs_ro(v);
     sync_vvmcs_guest_state(v, regs);
@@ -967,7 +951,6 @@ static void virtual_vmexit(struct cpu_us
     nestedhvm_vcpu_exit_guestmode(v);
     nvcpu->nv_vmexit_pending = 0;
 
-#ifdef __x86_64__
     lm_l2 = !!hvm_long_mode_enabled(v);
     lm_l1 = !!(__get_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_CONTROLS) &
                            VM_EXIT_IA32E_MODE);
@@ -976,17 +959,14 @@ static void virtual_vmexit(struct cpu_us
         v->arch.hvm_vcpu.guest_efer |= EFER_LMA | EFER_LME;
     else
         v->arch.hvm_vcpu.guest_efer &= ~(EFER_LMA | EFER_LME);
-#endif
 
     vmx_update_cpu_exec_control(v);
     vmx_update_exception_bitmap(v);
 
     load_vvmcs_host_state(v);
 
-#ifdef __x86_64__
     if ( lm_l1 != lm_l2 )
         paging_update_paging_modes(v);
-#endif
 
     regs->eip = __get_vvmcs(nvcpu->nv_vvmcx, HOST_RIP);
     regs->esp = __get_vvmcs(nvcpu->nv_vvmcx, HOST_RSP);
@@ -1341,9 +1321,7 @@ int nvmx_msr_read_intercept(unsigned int
         /* bit 0-8, 10,11,13,14,16,17 must be 1 (refer G4 of SDM) */
         tmp = 0x36dff;
         data = VM_EXIT_ACK_INTR_ON_EXIT;
-#ifdef __x86_64__
         data |= VM_EXIT_IA32E_MODE;
-#endif
        /* 0-settings */
         data = ((data | tmp) << 32) | tmp;
         break;
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c       Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/i387.c       Wed Sep 12 13:29:30 2012 +0100
@@ -56,12 +56,8 @@ static inline void fpu_fxrstor(struct vc
      * tools, by silently clearing the block.
      */
     asm volatile (
-#ifdef __i386__
-        "1: fxrstor %0            \n"
-#else /* __x86_64__ */
         /* See above for why the operands/constraints are this way. */
         "1: " REX64_PREFIX "fxrstor (%2)\n"
-#endif
         ".section .fixup,\"ax\"   \n"
         "2: push %%"__OP"ax       \n"
         "   push %%"__OP"cx       \n"
@@ -79,9 +75,7 @@ static inline void fpu_fxrstor(struct vc
         : 
         : "m" (*fpu_ctxt),
           "i" (sizeof(v->arch.xsave_area->fpu_sse)/4)
-#ifdef __x86_64__
           ,"cdaSDb" (fpu_ctxt)
-#endif
         );
 }
 
@@ -112,11 +106,6 @@ static inline void fpu_fxsave(struct vcp
 {
     char *fpu_ctxt = v->arch.fpu_ctxt;
 
-#ifdef __i386__
-    asm volatile (
-        "fxsave %0"
-        : "=m" (*fpu_ctxt) );
-#else /* __x86_64__ */
     /*
      * The only way to force fxsaveq on a wide range of gas versions. On 
      * older versions the rex64 prefix works only if we force an
@@ -125,7 +114,6 @@ static inline void fpu_fxsave(struct vcp
     asm volatile (
         REX64_PREFIX "fxsave (%1)"
         : "=m" (*fpu_ctxt) : "cdaSDb" (fpu_ctxt) );
-#endif
     
     /* Clear exception flags if FSW.ES is set. */
     if ( unlikely(fpu_ctxt[2] & 0x80) )
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/machine_kexec.c
--- a/xen/arch/x86/machine_kexec.c      Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/machine_kexec.c      Wed Sep 12 13:29:30 2012 +0100
@@ -16,9 +16,6 @@ typedef void (*relocate_new_kernel_t)(
                 unsigned long indirection_page,
                 unsigned long *page_list,
                 unsigned long start_address,
-#ifdef __i386__
-                unsigned int cpu_has_pae,
-#endif
                 unsigned int preserve_context);
 
 int machine_kexec_load(int type, int slot, xen_kexec_image_t *image)
@@ -113,9 +110,6 @@ void machine_kexec(xen_kexec_image_t *im
         rnk = (relocate_new_kernel_t) image->page_list[1];
         (*rnk)(image->indirection_page, image->page_list,
                image->start_address,
-#ifdef __i386__
-               1 /* cpu_has_pae */,
-#endif
                0 /* preserve_context */);
     }
 }
@@ -132,15 +126,7 @@ void arch_crash_save_vmcoreinfo(void)
        VMCOREINFO_SYMBOL(dom_xen);
        VMCOREINFO_SYMBOL(dom_io);
 
-#ifdef CONFIG_X86_32
-    VMCOREINFO_SYMBOL(xenheap_phys_end);
-#endif
-#ifdef CONFIG_X86_PAE
-       VMCOREINFO_SYMBOL_ALIAS(pgd_l3, idle_pg_table);
-#endif
-#ifdef CONFIG_X86_64
        VMCOREINFO_SYMBOL_ALIAS(pgd_l4, idle_pg_table);
-#endif
 }
 
 /*
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/mm.c Wed Sep 12 13:29:30 2012 +0100
@@ -130,9 +130,8 @@ l1_pgentry_t __attribute__ ((__section__
 /*
  * PTE updates can be done with ordinary writes except:
  *  1. Debug builds get extra checking by using CMPXCHG[8B].
- *  2. PAE builds perform an atomic 8-byte store with CMPXCHG8B.
  */
-#if !defined(NDEBUG) || defined(__i386__)
+#if !defined(NDEBUG)
 #define PTE_UPDATE_WITH_CMPXCHG
 #endif
 
@@ -156,17 +155,12 @@ bool_t __read_mostly machine_to_phys_map
 bool_t __read_mostly opt_allow_superpage;
 boolean_param("allowsuperpage", opt_allow_superpage);
 
-#ifdef __i386__
-static int get_superpage(unsigned long mfn, struct domain *d);
-#endif
 static void put_superpage(unsigned long mfn);
 
 static uint32_t base_disallow_mask;
 #define L1_DISALLOW_MASK (base_disallow_mask | _PAGE_GNTTAB)
 #define L2_DISALLOW_MASK (base_disallow_mask & ~_PAGE_PSE)
 
-#if defined(__x86_64__)
-
 #define l3_disallow_mask(d) (!is_pv_32on64_domain(d) ?  \
                              base_disallow_mask :       \
                              0xFFFFF198U)
@@ -179,12 +173,6 @@ static uint32_t base_disallow_mask;
 #define L1_DISALLOW_MASK ((base_disallow_mask | _PAGE_GNTTAB) & ~_PAGE_GLOBAL)
 #endif
 
-#elif defined (__i386__)
-
-#define l3_disallow_mask(d) 0xFFFFF1FEU /* must-be-zero */
-
-#endif
-
 #define l1_disallow_mask(d)                                     \
     ((d != dom_io) &&                                           \
      (rangeset_is_empty((d)->iomem_caps) &&                     \
@@ -193,7 +181,6 @@ static uint32_t base_disallow_mask;
       !is_hvm_domain(d)) ?                                      \
      L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS))
 
-#ifdef __x86_64__
 static void __init init_spagetable(void)
 {
     unsigned long s, start = SPAGETABLE_VIRT_START;
@@ -215,7 +202,6 @@ static void __init init_spagetable(void)
     }
     memset((void *)start, 0, end - start);
 }
-#endif
 
 static void __init init_frametable_chunk(void *start, void *end)
 {
@@ -253,9 +239,7 @@ void __init init_frametable(void)
     unsigned int sidx, eidx, nidx;
     unsigned int max_idx = (max_pdx + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT;
 
-#ifdef __x86_64__
     BUILD_BUG_ON(XEN_VIRT_END > FRAMETABLE_VIRT_END);
-#endif
     BUILD_BUG_ON(FRAMETABLE_VIRT_START & ((1UL << L2_PAGETABLE_SHIFT) - 1));
 
     for ( sidx = 0; ; sidx = nidx )
@@ -278,10 +262,8 @@ void __init init_frametable(void)
                (unsigned long)pdx_to_page(max_idx * PDX_GROUP_COUNT) -
                (unsigned long)pdx_to_page(max_pdx));
     }
-#ifdef __x86_64__
     if (opt_allow_superpage)
         init_spagetable();
-#endif
 }
 
 void __init arch_init_memory(void)
@@ -356,12 +338,7 @@ void __init arch_init_memory(void)
          * the statically-initialised 1-16MB mapping area.
          */
         iostart_pfn = max_t(unsigned long, pfn, 1UL << (20 - PAGE_SHIFT));
-#if defined(CONFIG_X86_32)
-        ioend_pfn = min_t(unsigned long, rstart_pfn,
-                          DIRECTMAP_MBYTES << (20 - PAGE_SHIFT));
-#else
         ioend_pfn = min(rstart_pfn, 16UL << (20 - PAGE_SHIFT));
-#endif
         if ( iostart_pfn < ioend_pfn )            
             destroy_xen_mappings((unsigned long)mfn_to_virt(iostart_pfn),
                                  (unsigned long)mfn_to_virt(ioend_pfn));
@@ -470,91 +447,11 @@ void share_xen_page_with_privileged_gues
     share_xen_page_with_guest(page, dom_xen, readonly);
 }
 
-#if defined(__i386__)
-
-#ifdef NDEBUG
-/* Only PDPTs above 4GB boundary need to be shadowed in low memory. */
-#define l3tab_needs_shadow(mfn) ((mfn) >= 0x100000)
-#else
-/*
- * In debug builds we shadow a selection of <4GB PDPTs to exercise code paths.
- * We cannot safely shadow the idle page table, nor shadow page tables
- * (detected by zero reference count). As required for correctness, we
- * always shadow PDPTs above 4GB.
- */
-#define l3tab_needs_shadow(mfn)                          \
-    (((((mfn) << PAGE_SHIFT) != __pa(idle_pg_table)) &&  \
-      (mfn_to_page(mfn)->count_info & PGC_count_mask) && \
-      ((mfn) & 1)) || /* odd MFNs are shadowed */        \
-     ((mfn) >= 0x100000))
-#endif
-
-static l1_pgentry_t *fix_pae_highmem_pl1e;
-
-/* Cache the address of PAE high-memory fixmap page tables. */
-static int __init cache_pae_fixmap_address(void)
-{
-    unsigned long fixmap_base = fix_to_virt(FIX_PAE_HIGHMEM_0);
-    l2_pgentry_t *pl2e = virt_to_xen_l2e(fixmap_base);
-    fix_pae_highmem_pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(fixmap_base);
-    return 0;
-}
-__initcall(cache_pae_fixmap_address);
-
-static DEFINE_PER_CPU(u32, make_cr3_timestamp);
-
-void make_cr3(struct vcpu *v, unsigned long mfn)
-/* Takes the MFN of a PAE l3 table, copies the contents to below 4GB if
- * necessary, and sets v->arch.cr3 to the value to load in CR3. */
-{
-    l3_pgentry_t *highmem_l3tab, *lowmem_l3tab;
-    struct pae_l3_cache *cache = &v->arch.pae_l3_cache;
-    unsigned int cpu = smp_processor_id();
-
-    /* Fast path: does this mfn need a shadow at all? */
-    if ( !l3tab_needs_shadow(mfn) )
-    {
-        v->arch.cr3 = mfn << PAGE_SHIFT;
-        /* Cache is no longer in use or valid */
-        cache->high_mfn = 0;
-        return;
-    }
-
-    /* Caching logic is not interrupt safe. */
-    ASSERT(!in_irq());
-
-    /* Protects against pae_flush_pgd(). */
-    spin_lock(&cache->lock);
-
-    cache->inuse_idx ^= 1;
-    cache->high_mfn   = mfn;
-
-    /* Map the guest L3 table and copy to the chosen low-memory cache. */
-    l1e_write(fix_pae_highmem_pl1e-cpu, l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
-    /* First check the previous high mapping can't be in the TLB. 
-     * (i.e. have we loaded CR3 since we last did this?) */
-    if ( unlikely(this_cpu(make_cr3_timestamp) == this_cpu(tlbflush_time)) )
-        flush_tlb_one_local(fix_to_virt(FIX_PAE_HIGHMEM_0 + cpu));
-    highmem_l3tab = (l3_pgentry_t *)fix_to_virt(FIX_PAE_HIGHMEM_0 + cpu);
-    lowmem_l3tab  = cache->table[cache->inuse_idx];
-    memcpy(lowmem_l3tab, highmem_l3tab, sizeof(cache->table[0]));
-    l1e_write(fix_pae_highmem_pl1e-cpu, l1e_empty());
-    this_cpu(make_cr3_timestamp) = this_cpu(tlbflush_time);
-
-    v->arch.cr3 = __pa(lowmem_l3tab);
-
-    spin_unlock(&cache->lock);
-}
-
-#else /* !defined(__i386__) */
-
 void make_cr3(struct vcpu *v, unsigned long mfn)
 {
     v->arch.cr3 = mfn << PAGE_SHIFT;
 }
 
-#endif /* !defined(__i386__) */
-
 void write_ptbase(struct vcpu *v)
 {
     write_cr3(v->arch.cr3);
@@ -721,7 +618,6 @@ static int get_page_and_type_from_pagenr
     return rc;
 }
 
-#ifdef __x86_64__
 static void put_data_page(
     struct page_info *page, int writeable)
 {
@@ -730,7 +626,6 @@ static void put_data_page(
     else
         put_page(page);
 }
-#endif
 
 /*
  * We allow root tables to map each other (a.k.a. linear page tables). It
@@ -805,7 +700,6 @@ int is_iomem_page(unsigned long mfn)
 static int update_xen_mappings(unsigned long mfn, unsigned long cacheattr)
 {
     int err = 0;
-#ifdef __x86_64__
     bool_t alias = mfn >= PFN_DOWN(xen_phys_start) &&
          mfn < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START);
     unsigned long xen_va =
@@ -818,7 +712,6 @@ static int update_xen_mappings(unsigned 
                      PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
     if ( unlikely(alias) && !cacheattr && !err )
         err = map_pages_to_xen(xen_va, mfn, 1, PAGE_HYPERVISOR);
-#endif
     return err;
 }
 
@@ -1058,8 +951,6 @@ get_page_from_l4e(
 }
 #endif /* 4 level */
 
-#ifdef __x86_64__
-
 #ifdef USER_MAPPINGS_ARE_GLOBAL
 #define adjust_guest_l1e(pl1e, d)                                            \
     do {                                                                     \
@@ -1108,24 +999,12 @@ get_page_from_l4e(
             l4e_add_flags((pl4e), _PAGE_USER);                  \
     } while ( 0 )
 
-#else /* !defined(__x86_64__) */
-
-#define adjust_guest_l1e(_p, _d) ((void)(_d))
-#define adjust_guest_l2e(_p, _d) ((void)(_d))
-#define adjust_guest_l3e(_p, _d) ((void)(_d))
-
-#endif
-
-#ifdef __x86_64__
 #define unadjust_guest_l3e(pl3e, d)                                         \
     do {                                                                    \
         if ( unlikely(is_pv_32on64_domain(d)) &&                            \
              likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )                \
             l3e_remove_flags((pl3e), _PAGE_USER|_PAGE_RW|_PAGE_ACCESSED);   \
     } while ( 0 )
-#else
-#define unadjust_guest_l3e(_p, _d) ((void)(_d))
-#endif
 
 void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner)
 {
@@ -1209,7 +1088,6 @@ static int put_page_from_l3e(l3_pgentry_
     if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || (l3e_get_pfn(l3e) == pfn) )
         return 1;
 
-#ifdef __x86_64__
     if ( unlikely(l3e_get_flags(l3e) & _PAGE_PSE) )
     {
         unsigned long mfn = l3e_get_pfn(l3e);
@@ -1222,7 +1100,6 @@ static int put_page_from_l3e(l3_pgentry_
 
         return 0;
     }
-#endif
 
     if ( unlikely(partial > 0) )
         return __put_page_type(l3e_get_page(l3e), preemptible);
@@ -1289,10 +1166,6 @@ static int create_pae_xen_mappings(struc
 {
     struct page_info *page;
     l3_pgentry_t     l3e3;
-#ifdef __i386__
-    l2_pgentry_t     *pl2e, l2e;
-    int              i;
-#endif
 
     if ( !is_pv_32bit_domain(d) )
         return 1;
@@ -1326,76 +1199,9 @@ static int create_pae_xen_mappings(struc
         return 0;
     }
 
-#ifdef __i386__
-    /* Xen linear pagetable mappings. */
-    pl2e = map_domain_page(l3e_get_pfn(l3e3));
-    for ( i = 0; i < (LINEARPT_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
-    {
-        l2e = l2e_empty();
-        if ( l3e_get_flags(pl3e[i]) & _PAGE_PRESENT )
-            l2e = l2e_from_pfn(l3e_get_pfn(pl3e[i]), __PAGE_HYPERVISOR);
-        l2e_write(&pl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i], l2e);
-    }
-    unmap_domain_page(pl2e);
-#endif
-
     return 1;
 }
 
-#ifdef __i386__
-/* Flush a pgdir update into low-memory caches. */
-static void pae_flush_pgd(
-    unsigned long mfn, unsigned int idx, l3_pgentry_t nl3e)
-{
-    struct domain *d = page_get_owner(mfn_to_page(mfn));
-    struct vcpu   *v;
-    intpte_t       _ol3e, _nl3e, _pl3e;
-    l3_pgentry_t  *l3tab_ptr;
-    struct pae_l3_cache *cache;
-
-    if ( unlikely(shadow_mode_enabled(d)) )
-    {
-        cpumask_t m;
-
-        /* Re-shadow this l3 table on any vcpus that are using it */
-        cpumask_clear(&m);
-        for_each_vcpu ( d, v )
-            if ( pagetable_get_pfn(v->arch.guest_table) == mfn )
-            {
-                paging_update_cr3(v);
-                cpumask_or(&m, &m, v->vcpu_dirty_cpumask);
-            }
-        flush_tlb_mask(&m);
-    }
-
-    /* If below 4GB then the pgdir is not shadowed in low memory. */
-    if ( !l3tab_needs_shadow(mfn) )
-        return;
-
-    for_each_vcpu ( d, v )
-    {
-        cache = &v->arch.pae_l3_cache;
-
-        spin_lock(&cache->lock);
-
-        if ( cache->high_mfn == mfn )
-        {
-            l3tab_ptr = &cache->table[cache->inuse_idx][idx];
-            _ol3e = l3e_get_intpte(*l3tab_ptr);
-            _nl3e = l3e_get_intpte(nl3e);
-            _pl3e = cmpxchg(&l3e_get_intpte(*l3tab_ptr), _ol3e, _nl3e);
-            BUG_ON(_pl3e != _ol3e);
-        }
-
-        spin_unlock(&cache->lock);
-    }
-
-    flush_tlb_mask(d->domain_dirty_cpumask);
-}
-#else
-# define pae_flush_pgd(mfn, idx, nl3e) ((void)0)
-#endif
-
 static int alloc_l2_table(struct page_info *page, unsigned long type,
                           int preemptible)
 {
@@ -1435,22 +1241,10 @@ static int alloc_l2_table(struct page_in
     if ( rc >= 0 && (type & PGT_pae_xen_l2) )
     {
         /* Xen private mappings. */
-#if defined(__i386__)
-        memcpy(&pl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
-               &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
-               L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
-        for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
-            l2e_write(&pl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i],
-                      l2e_from_page(perdomain_pt_page(d, i),
-                                    __PAGE_HYPERVISOR));
-        pl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
-            l2e_from_pfn(pfn, __PAGE_HYPERVISOR);
-#else
         memcpy(&pl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
                &compat_idle_pg_table_l2[
                    l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
                COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*pl2e));
-#endif
     }
 
     unmap_domain_page(pl2e);
@@ -1626,9 +1420,7 @@ static void free_l1_table(struct page_in
 
 static int free_l2_table(struct page_info *page, int preemptible)
 {
-#ifdef __x86_64__
     struct domain *d = page_get_owner(page);
-#endif
     unsigned long pfn = page_to_mfn(page);
     l2_pgentry_t *pl2e;
     unsigned int  i = page->nr_validated_ptes - 1;
@@ -2024,13 +1816,9 @@ static int mod_l3_entry(l3_pgentry_t *pl
     }
 
     if ( likely(rc == 0) )
-    {
         if ( !create_pae_xen_mappings(d, pl3e) )
             BUG();
 
-        pae_flush_pgd(pfn, pgentry_ptr_to_slot(pl3e), nl3e);
-    }
-
     put_page_from_l3e(ol3e, pfn, 0, 0);
     return rc;
 }
@@ -2612,8 +2400,6 @@ static void put_spage_pages(struct page_
     return;
 }
 
-#ifdef __x86_64__
-
 static int mark_superpage(struct spage_info *spage, struct domain *d)
 {
     unsigned long x, nx, y = spage->type_info;
@@ -2774,25 +2560,6 @@ static void put_superpage(unsigned long 
     return;
 }
 
-#else /* __i386__ */
-
-void clear_superpage_mark(struct page_info *page)
-{
-}
-
-static int get_superpage(unsigned long mfn, struct domain *d)
-{
-    return get_spage_pages(mfn_to_page(mfn), d);
-}
-
-static void put_superpage(unsigned long mfn)
-{
-    put_spage_pages(mfn_to_page(mfn));
-}
-
-#endif
-
-
 int new_guest_cr3(unsigned long mfn)
 {
     struct vcpu *curr = current;
@@ -2800,7 +2567,6 @@ int new_guest_cr3(unsigned long mfn)
     int okay;
     unsigned long old_base_mfn;
 
-#ifdef __x86_64__
     if ( is_pv_32on64_domain(d) )
     {
         okay = paging_mode_refcounts(d)
@@ -2822,7 +2588,7 @@ int new_guest_cr3(unsigned long mfn)
 
         return 1;
     }
-#endif
+
     okay = paging_mode_refcounts(d)
         ? get_page_from_pagenr(mfn, d)
         : !get_page_and_type_from_pagenr(mfn, PGT_root_page_table, d, 0, 0);
@@ -2948,28 +2714,8 @@ static inline int vcpumask_to_pcpumask(
     }
 }
 
-#ifdef __i386__
-static inline void *fixmap_domain_page(unsigned long mfn)
-{
-    unsigned int cpu = smp_processor_id();
-    void *ptr = (void *)fix_to_virt(FIX_PAE_HIGHMEM_0 + cpu);
-
-    l1e_write(fix_pae_highmem_pl1e - cpu,
-              l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
-    flush_tlb_one_local(ptr);
-    return ptr;
-}
-static inline void fixunmap_domain_page(const void *ptr)
-{
-    unsigned int cpu = virt_to_fix((unsigned long)ptr) - FIX_PAE_HIGHMEM_0;
-
-    l1e_write(fix_pae_highmem_pl1e - cpu, l1e_empty());
-    this_cpu(make_cr3_timestamp) = this_cpu(tlbflush_time);
-}
-#else
 #define fixmap_domain_page(mfn) mfn_to_virt(mfn)
 #define fixunmap_domain_page(ptr) ((void)(ptr))
-#endif
 
 long do_mmuext_op(
     XEN_GUEST_HANDLE(mmuext_op_t) uops,
@@ -3141,8 +2887,6 @@ long do_mmuext_op(
                     && new_guest_cr3(op.arg1.mfn));
             break;
 
-        
-#ifdef __x86_64__
         case MMUEXT_NEW_USER_BASEPTR: {
             unsigned long old_mfn;
 
@@ -3179,7 +2923,6 @@ long do_mmuext_op(
 
             break;
         }
-#endif
         
         case MMUEXT_TLB_FLUSH_LOCAL:
             flush_tlb_local();
@@ -3345,7 +3088,6 @@ long do_mmuext_op(
             break;
         }
 
-#ifdef __x86_64__
         case MMUEXT_MARK_SUPER:
         {
             unsigned long mfn;
@@ -3397,7 +3139,6 @@ long do_mmuext_op(
             okay = (unmark_superpage(spage) >= 0);
             break;
         }
-#endif
 
         default:
             MEM_LOG("Invalid extended pt command 0x%x", op.cmd);
@@ -5195,7 +4936,6 @@ int ptwr_do_page_fault(struct vcpu *v, u
     return 0;
 }
 
-#ifdef __x86_64__
 /*************************
  * fault handling for read-only MMIO pages
  */
@@ -5284,7 +5024,6 @@ int mmio_ro_do_page_fault(struct vcpu *v
 
     return rc != X86EMUL_UNHANDLEABLE ? EXCRET_fault_fixed : 0;
 }
-#endif /* __x86_64__ */
 
 void free_xen_pagetable(void *v)
 {
@@ -5325,7 +5064,6 @@ int map_pages_to_xen(
 
     while ( nr_mfns != 0 )
     {
-#ifdef __x86_64__
         l3_pgentry_t ol3e, *pl3e = virt_to_xen_l3e(virt);
 
         if ( !pl3e )
@@ -5447,7 +5185,6 @@ int map_pages_to_xen(
                                                 __PAGE_HYPERVISOR));
             flush_area(virt, flush_flags);
         }
-#endif
 
         pl2e = virt_to_xen_l2e(virt);
         if ( !pl2e )
@@ -5588,8 +5325,7 @@ int map_pages_to_xen(
             }
         }
 
- check_l3: ;
-#ifdef __x86_64__
+ check_l3:
         if ( cpu_has_page1gb &&
              (flags == PAGE_HYPERVISOR) &&
              ((nr_mfns == 0) ||
@@ -5617,7 +5353,6 @@ int map_pages_to_xen(
                 free_xen_pagetable(l3e_to_l2e(ol3e));
             }
         }
-#endif
     }
 
     return 0;
@@ -5635,7 +5370,6 @@ void destroy_xen_mappings(unsigned long 
 
     while ( v < e )
     {
-#ifdef __x86_64__
         l3_pgentry_t *pl3e = virt_to_xen_l3e(v);
 
         if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
@@ -5667,7 +5401,6 @@ void destroy_xen_mappings(unsigned long 
             l3e_write_atomic(pl3e, l3e_from_pfn(virt_to_mfn(pl2e),
                                                 __PAGE_HYPERVISOR));
         }
-#endif
 
         pl2e = virt_to_xen_l2e(v);
 
@@ -5722,7 +5455,6 @@ void destroy_xen_mappings(unsigned long 
             }
         }
 
-#ifdef __x86_64__
         /* If we are done with the L3E, check if it is now empty. */
         if ( (v != e) && (l2_table_offset(v) + l1_table_offset(v) != 0) )
             continue;
@@ -5737,7 +5469,6 @@ void destroy_xen_mappings(unsigned long 
             flush_area(NULL, FLUSH_TLB_GLOBAL); /* flush before free */
             free_xen_pagetable(pl2e);
         }
-#endif
     }
 
     flush_area(NULL, FLUSH_TLB_GLOBAL);
@@ -5755,13 +5486,6 @@ void __set_fixmap(
 void memguard_init(void)
 {
     unsigned long start = max_t(unsigned long, xen_phys_start, 1UL << 20);
-#ifdef __i386__
-    map_pages_to_xen(
-        (unsigned long)__va(start),
-        start >> PAGE_SHIFT,
-        (xenheap_phys_end - start) >> PAGE_SHIFT,
-        __PAGE_HYPERVISOR|MAP_SMALL_PAGES);
-#else
     map_pages_to_xen(
         (unsigned long)__va(start),
         start >> PAGE_SHIFT,
@@ -5773,7 +5497,6 @@ void memguard_init(void)
         start >> PAGE_SHIFT,
         (__pa(&_end) + PAGE_SIZE - 1 - start) >> PAGE_SHIFT,
         __PAGE_HYPERVISOR|MAP_SMALL_PAGES);
-#endif
 }
 
 static void __memguard_change_range(void *p, unsigned long l, int guard)
@@ -5820,18 +5543,12 @@ void memguard_unguard_stack(void *p)
     memguard_unguard_range(p, PAGE_SIZE);
 }
 
-#if defined(__x86_64__)
 void arch_dump_shared_mem_info(void)
 {
     printk("Shared frames %u -- Saved frames %u\n",
             mem_sharing_get_nr_shared_mfns(),
             mem_sharing_get_nr_saved_mfns());
 }
-#else
-void arch_dump_shared_mem_info(void)
-{
-}
-#endif
 
 /*
  * Local variables:
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c  Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/mm/p2m-pt.c  Wed Sep 12 13:29:30 2012 +0100
@@ -63,21 +63,12 @@
 static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn)
 {
     unsigned long flags;
-#ifdef __x86_64__
     /*
      * AMD IOMMU: When we share p2m table with iommu, bit 9 - bit 11 will be
      * used for iommu hardware to encode next io page level. Bit 59 - bit 62
      * are used for iommu flags, We could not use these bits to store p2m 
types.
      */
     flags = (unsigned long)(t & 0x7f) << 12;
-#else
-    flags = (t & 0x7UL) << 9;
-#endif
-
-#ifndef __x86_64__
-    /* 32-bit builds don't support a lot of the p2m types */
-    BUG_ON(t > p2m_populate_on_demand);
-#endif
 
     switch(t)
     {
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/mm/p2m.c     Wed Sep 12 13:29:30 2012 +0100
@@ -170,7 +170,6 @@ mfn_t __get_gfn_type_access(struct p2m_d
 
     mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order);
 
-#ifdef __x86_64__
     if ( (q & P2M_UNSHARE) && p2m_is_shared(*t) )
     {
         ASSERT(!p2m_is_nestedp2m(p2m));
@@ -180,9 +179,7 @@ mfn_t __get_gfn_type_access(struct p2m_d
             (void)mem_sharing_notify_enomem(p2m->domain, gfn, 0);
         mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order);
     }
-#endif
 
-#ifdef __x86_64__
     if (unlikely((p2m_is_broken(*t))))
     {
         /* Return invalid_mfn to avoid caller's access */
@@ -190,7 +187,6 @@ mfn_t __get_gfn_type_access(struct p2m_d
         if ( q & P2M_ALLOC )
             domain_crash(p2m->domain);
     }
-#endif
 
     return mfn;
 }
@@ -412,18 +408,15 @@ void p2m_teardown(struct p2m_domain *p2m
 {
     struct page_info *pg;
     struct domain *d = p2m->domain;
-#ifdef __x86_64__
     unsigned long gfn;
     p2m_type_t t;
     mfn_t mfn;
-#endif
 
     if (p2m == NULL)
         return;
 
     p2m_lock(p2m);
 
-#ifdef __x86_64__
     /* Try to unshare any remaining shared p2m entries. Safeguard
      * Since relinquish_shared_pages should have done the work. */ 
     for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
@@ -439,7 +432,6 @@ void p2m_teardown(struct p2m_domain *p2m
             BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN));
         }
     }
-#endif
 
     p2m->phys_table = pagetable_null();
 
@@ -565,7 +557,6 @@ guest_physmap_add_entry(struct domain *d
     for ( i = 0; i < (1UL << page_order); i++ )
     {
         omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL);
-#ifdef __x86_64__
         if ( p2m_is_shared(ot) )
         {
             /* Do an unshare to cleanly take care of all corner 
@@ -592,7 +583,6 @@ guest_physmap_add_entry(struct domain *d
             omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL);
             ASSERT(!p2m_is_shared(ot));
         }
-#endif /* __x86_64__ */
         if ( p2m_is_grant(ot) )
         {
             /* Really shouldn't be unmapping grant maps this way */
@@ -840,7 +830,6 @@ set_shared_p2m_entry(struct domain *d, u
     return rc;
 }
 
-#ifdef __x86_64__
 /**
  * p2m_mem_paging_nominate - Mark a guest page as to-be-paged-out
  * @d: guest domain
@@ -1430,9 +1419,6 @@ int p2m_get_mem_access(struct domain *d,
     return 0;
 }
 
-
-#endif /* __x86_64__ */
-
 static struct p2m_domain *
 p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
 {
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/mm/shadow/Makefile
--- a/xen/arch/x86/mm/shadow/Makefile   Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/mm/shadow/Makefile   Wed Sep 12 13:29:30 2012 +0100
@@ -1,4 +1,3 @@
-obj-$(x86_32) += common.o guest_2.o guest_3.o
 obj-$(x86_64) += common.o guest_2.o guest_3.o guest_4.o
 
 guest_%.o: multi.c Makefile
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/mm/shadow/common.c   Wed Sep 12 13:29:30 2012 +0100
@@ -276,12 +276,6 @@ hvm_emulate_cmpxchg(enum x86_segment seg
         return v->arch.paging.mode->shadow.x86_emulate_cmpxchg(
             v, addr, old[0], new[0], bytes, sh_ctxt);
 
-#ifdef __i386__
-    if ( bytes == 8 )
-        return v->arch.paging.mode->shadow.x86_emulate_cmpxchg8b(
-            v, addr, old[0], old[1], new[0], new[1], sh_ctxt);
-#endif
-
     return X86EMUL_UNHANDLEABLE;
 }
 
@@ -353,12 +347,6 @@ pv_emulate_cmpxchg(enum x86_segment seg,
         return v->arch.paging.mode->shadow.x86_emulate_cmpxchg(
             v, offset, old[0], new[0], bytes, sh_ctxt);
 
-#ifdef __i386__
-    if ( bytes == 8 )
-        return v->arch.paging.mode->shadow.x86_emulate_cmpxchg8b(
-            v, offset, old[0], old[1], new[0], new[1], sh_ctxt);
-#endif
-
     return X86EMUL_UNHANDLEABLE;
 }
 
@@ -2879,29 +2867,23 @@ static void sh_update_paging_modes(struc
             v->arch.guest_table = d->arch.paging.shadow.unpaged_pagetable;
             v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 2);
         }
+        else if ( hvm_long_mode_enabled(v) )
+        {
+            // long mode guest...
+            v->arch.paging.mode =
+                &SHADOW_INTERNAL_NAME(sh_paging_mode, 4);
+        }
+        else if ( hvm_pae_enabled(v) )
+        {
+            // 32-bit PAE mode guest...
+            v->arch.paging.mode =
+                &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
+        }
         else
         {
-#ifdef __x86_64__
-            if ( hvm_long_mode_enabled(v) )
-            {
-                // long mode guest...
-                v->arch.paging.mode =
-                    &SHADOW_INTERNAL_NAME(sh_paging_mode, 4);
-            }
-            else
-#endif
-                if ( hvm_pae_enabled(v) )
-                {
-                    // 32-bit PAE mode guest...
-                    v->arch.paging.mode =
-                        &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
-                }
-                else
-                {
-                    // 32-bit 2 level guest...
-                    v->arch.paging.mode =
-                        &SHADOW_INTERNAL_NAME(sh_paging_mode, 2);
-                }
+            // 32-bit 2 level guest...
+            v->arch.paging.mode =
+                &SHADOW_INTERNAL_NAME(sh_paging_mode, 2);
         }
 
         if ( pagetable_is_null(v->arch.monitor_table) )
@@ -3664,11 +3646,6 @@ int shadow_track_dirty_vram(struct domai
     }
     else
     {
-#ifdef __i386__
-        unsigned long map_mfn = INVALID_MFN;
-        void *map_sl1p = NULL;
-#endif
-
         /* Iterate over VRAM to track dirty bits. */
         for ( i = 0; i < nr; i++ ) {
             mfn_t mfn = get_gfn_query_unlocked(d, begin_pfn + i, &t);
@@ -3702,21 +3679,7 @@ int shadow_track_dirty_vram(struct domai
                     {
                         /* Hopefully the most common case: only one mapping,
                          * whose dirty bit we can use. */
-                        l1_pgentry_t *sl1e;
-#ifdef __i386__
-                        void *sl1p = map_sl1p;
-                        unsigned long sl1mfn = paddr_to_pfn(sl1ma);
-
-                        if ( sl1mfn != map_mfn ) {
-                            if ( map_sl1p )
-                                sh_unmap_domain_page(map_sl1p);
-                            map_sl1p = sl1p = sh_map_domain_page(_mfn(sl1mfn));
-                            map_mfn = sl1mfn;
-                        }
-                        sl1e = sl1p + (sl1ma & ~PAGE_MASK);
-#else
-                        sl1e = maddr_to_virt(sl1ma);
-#endif
+                        l1_pgentry_t *sl1e = maddr_to_virt(sl1ma);
 
                         if ( l1e_get_flags(*sl1e) & _PAGE_DIRTY )
                         {
@@ -3743,11 +3706,6 @@ int shadow_track_dirty_vram(struct domai
             }
         }
 
-#ifdef __i386__
-        if ( map_sl1p )
-            sh_unmap_domain_page(map_sl1p);
-#endif
-
         rc = -EFAULT;
         if ( copy_to_guest(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size) 
== 0 ) {
             memset(dirty_vram->dirty_bitmap, 0, dirty_size);
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/mm/shadow/multi.c    Wed Sep 12 13:29:30 2012 +0100
@@ -5096,41 +5096,6 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
     return rv;
 }
 
-#ifdef __i386__
-static int
-sh_x86_emulate_cmpxchg8b(struct vcpu *v, unsigned long vaddr, 
-                          unsigned long old_lo, unsigned long old_hi,
-                          unsigned long new_lo, unsigned long new_hi,
-                          struct sh_emulate_ctxt *sh_ctxt)
-{
-    void *addr;
-    u64 old, new, prev;
-    int rv = X86EMUL_OKAY;
-
-    /* Unaligned writes are only acceptable on HVM */
-    if ( (vaddr & 7) && !is_hvm_vcpu(v) )
-        return X86EMUL_UNHANDLEABLE;
-
-    addr = emulate_map_dest(v, vaddr, 8, sh_ctxt);
-    if ( emulate_map_dest_failed(addr) )
-        return (long)addr;
-
-    old = (((u64) old_hi) << 32) | (u64) old_lo;
-    new = (((u64) new_hi) << 32) | (u64) new_lo;
-
-    paging_lock(v->domain);
-    prev = cmpxchg(((u64 *)addr), old, new);
-
-    if ( prev != old )
-        rv = X86EMUL_CMPXCHG_FAILED;
-
-    emulate_unmap_dest(v, addr, 8, sh_ctxt);
-    shadow_audit_tables(v);
-    paging_unlock(v->domain);
-    return rv;
-}
-#endif
-
 /**************************************************************************/
 /* Audit tools */
 
@@ -5455,9 +5420,6 @@ const struct paging_mode sh_paging_mode 
     .shadow.detach_old_tables      = sh_detach_old_tables,
     .shadow.x86_emulate_write      = sh_x86_emulate_write,
     .shadow.x86_emulate_cmpxchg    = sh_x86_emulate_cmpxchg,
-#ifdef __i386__
-    .shadow.x86_emulate_cmpxchg8b  = sh_x86_emulate_cmpxchg8b,
-#endif
     .shadow.make_monitor_table     = sh_make_monitor_table,
     .shadow.destroy_monitor_table  = sh_destroy_monitor_table,
 #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/numa.c
--- a/xen/arch/x86/numa.c       Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/numa.c       Wed Sep 12 13:29:30 2012 +0100
@@ -96,7 +96,6 @@ static int __init populate_memnodemap(co
 
 static int __init allocate_cachealigned_memnodemap(void)
 {
-#ifndef __i386__
        unsigned long size = PFN_UP(memnodemapsize * sizeof(*memnodemap));
        unsigned long mfn = alloc_boot_pages(size, 1);
 
@@ -115,13 +114,6 @@ static int __init allocate_cachealigned_
        memnodemapsize = size / sizeof(*memnodemap);
 
        return 0;
-#else
-       printk(KERN_ERR
-              "Memory to Node hash needs %lu entries, got only %zu\n",
-              memnodemapsize, ARRAY_SIZE(_memnodemap));
-       memnodemapsize = 0;
-       return -1;
-#endif
 }
 
 /*
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/oprofile/backtrace.c
--- a/xen/arch/x86/oprofile/backtrace.c Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/oprofile/backtrace.c Wed Sep 12 13:29:30 2012 +0100
@@ -10,10 +10,10 @@
  *
  */
 
-#include<xen/types.h>
-#include<asm/page.h>
-#include<xen/xenoprof.h>
-#include<xen/guest_access.h>
+#include <xen/types.h>
+#include <asm/page.h>
+#include <xen/xenoprof.h>
+#include <xen/guest_access.h>
 
 struct frame_head {
     struct frame_head * ebp;
@@ -22,14 +22,12 @@ struct frame_head {
 typedef struct frame_head frame_head_t;
 DEFINE_XEN_GUEST_HANDLE(frame_head_t);
 
-#ifdef CONFIG_X86_64
 struct frame_head_32bit {
     uint32_t ebp;
     uint32_t ret;
 } __attribute__((packed));
 typedef struct frame_head_32bit frame_head32_t;
 DEFINE_COMPAT_HANDLE(frame_head32_t);
-#endif
 
 static struct frame_head *
 dump_hypervisor_backtrace(struct vcpu *vcpu, const struct frame_head *head,
@@ -46,7 +44,6 @@ dump_hypervisor_backtrace(struct vcpu *v
     return head->ebp;
 }
 
-#ifdef CONFIG_X86_64
 static inline int is_32bit_vcpu(struct vcpu *vcpu)
 {
     if (is_hvm_vcpu(vcpu))
@@ -54,7 +51,6 @@ static inline int is_32bit_vcpu(struct v
     else
         return is_pv_32bit_vcpu(vcpu);
 }
-#endif
 
 static struct frame_head *
 dump_guest_backtrace(struct vcpu *vcpu, const struct frame_head *head,
@@ -62,7 +58,6 @@ dump_guest_backtrace(struct vcpu *vcpu, 
 {
     frame_head_t bufhead;
 
-#ifdef CONFIG_X86_64
     if ( is_32bit_vcpu(vcpu) )
     {
         __compat_handle_const_frame_head32_t guest_head =
@@ -78,7 +73,6 @@ dump_guest_backtrace(struct vcpu *vcpu, 
         bufhead.ret = bufhead32.ret;
     }
     else
-#endif
     {
         XEN_GUEST_HANDLE(const_frame_head_t) guest_head =
             const_guest_handle_from_ptr(head, frame_head_t);
@@ -136,11 +130,7 @@ static int valid_hypervisor_stack(const 
                                  const struct cpu_user_regs *regs)
 {
     unsigned long headaddr = (unsigned long)head;
-#ifdef CONFIG_X86_64
     unsigned long stack = (unsigned long)regs->rsp;
-#else
-    unsigned long stack = (unsigned long)regs;
-#endif
     unsigned long stack_base = (stack & ~(STACK_SIZE - 1)) + STACK_SIZE;
 
     return headaddr > stack && headaddr < stack_base;
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/oprofile/op_model_athlon.c
--- a/xen/arch/x86/oprofile/op_model_athlon.c   Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/oprofile/op_model_athlon.c   Wed Sep 12 13:29:30 2012 +0100
@@ -53,12 +53,8 @@ static unsigned long reset_value[MAX_COU
 
 extern char svm_stgi_label[];
 
-#ifdef CONFIG_X86_64
 u32 ibs_caps = 0;
 static u64 ibs_op_ctl;
-#else
-#define ibs_op_ctl 0
-#endif
 
 /* IBS cpuid feature detection */
 #define IBS_CPUID_FEATURES              0x8000001b
@@ -352,7 +348,6 @@ static int athlon_check_ctrs(unsigned in
 
 static inline void start_ibs(void)
 {
-#ifdef CONFIG_X86_64
        u64 val = 0;
 
        if (!ibs_caps)
@@ -390,7 +385,6 @@ static inline void start_ibs(void)
                val = op_amd_randomize_ibs_op(ibs_op_ctl);
                wrmsrl(MSR_AMD64_IBSOPCTL, val);
        }
-#endif
 }
  
 static void athlon_start(struct op_msrs const * const msrs)
@@ -439,8 +433,6 @@ static void athlon_stop(struct op_msrs c
        stop_ibs();
 }
 
-#ifdef CONFIG_X86_64
-
 #define IBSCTL_LVTOFFSETVAL             (1 << 8)
 #define APIC_EILVT_MSG_NMI              0x4
 #define APIC_EILVT_LVTOFF_IBS           1
@@ -535,8 +527,6 @@ void __init ibs_init(void)
                (unsigned)ibs_caps);
 }
 
-#endif /* CONFIG_X86_64 */
-
 struct op_x86_model_spec const op_athlon_spec = {
        .num_counters = K7_NUM_COUNTERS,
        .num_controls = K7_NUM_CONTROLS,
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/physdev.c
--- a/xen/arch/x86/physdev.c    Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/physdev.c    Wed Sep 12 13:29:30 2012 +0100
@@ -23,9 +23,7 @@ int physdev_map_pirq(domid_t, int type, 
                      struct msi_info *);
 int physdev_unmap_pirq(domid_t, int pirq);
 
-#ifdef CONFIG_X86_64
 #include "x86_64/mmconfig.h"
-#endif
 
 #ifndef COMPAT
 typedef long ret_t;
@@ -609,7 +607,6 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
         break;
     }
 
-#ifdef __x86_64__
     case PHYSDEVOP_pci_mmcfg_reserved: {
         struct physdev_pci_mmcfg_reserved info;
 
@@ -629,7 +626,6 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
                                  info.start_bus, info.end_bus, info.flags);
         break;
     }
-#endif
 
     case PHYSDEVOP_restore_msi: {
         struct physdev_restore_msi restore_msi;
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/setup.c      Wed Sep 12 13:29:30 2012 +0100
@@ -86,13 +86,7 @@ cpumask_t __read_mostly cpu_present_map;
 
 unsigned long __read_mostly xen_phys_start;
 
-#ifdef CONFIG_X86_32
-/* Limits of Xen heap, used to initialise the allocator. */
-unsigned long __initdata xenheap_initial_phys_start;
-unsigned long __read_mostly xenheap_phys_end;
-#else
 unsigned long __read_mostly xen_virt_end;
-#endif
 
 DEFINE_PER_CPU(struct tss_struct, init_tss);
 
@@ -174,10 +168,8 @@ static void free_xen_data(char *s, char 
     init_xenheap_pages(__pa(s), __pa(e));
 #endif
     memguard_guard_range(s, e-s);
-#if defined(CONFIG_X86_64)
     /* Also zap the mapping in the 1:1 area. */
     memguard_guard_range(__va(__pa(s)), e-s);
-#endif
 }
 
 extern char __init_begin[], __init_end[], __bss_start[];
@@ -271,10 +263,8 @@ static void *__init bootstrap_map(const 
     uint64_t start, end, mask = (1L << L2_PAGETABLE_SHIFT) - 1;
     void *ret;
 
-#ifdef __x86_64__
     if ( system_state != SYS_STATE_early_boot )
         return mod ? mfn_to_virt(mod->mod_start) : NULL;
-#endif
 
     if ( !mod )
     {
@@ -384,7 +374,6 @@ static uint64_t __init consider_modules(
 
 static void __init setup_max_pdx(void)
 {
-#ifdef __x86_64__
     max_pdx = pfn_to_pdx(max_page - 1) + 1;
 
     if ( max_pdx > (DIRECTMAP_SIZE >> PAGE_SHIFT) )
@@ -394,7 +383,6 @@ static void __init setup_max_pdx(void)
         max_pdx = FRAMETABLE_SIZE / sizeof(*frame_table);
 
     max_page = pdx_to_pfn(max_pdx - 1) + 1;
-#endif
 }
 
 void set_pdx_range(unsigned long smfn, unsigned long emfn)
@@ -680,11 +668,9 @@ void __init __start_xen(unsigned long mb
         destroy_xen_mappings(xen_phys_start,
                              xen_phys_start + BOOTSTRAP_MAP_BASE);
 
-#ifdef CONFIG_X86_64
         /* Make boot page tables match non-EFI boot. */
         l3_bootmap[l3_table_offset(BOOTSTRAP_MAP_BASE)] =
             l3e_from_paddr(__pa(l2_bootmap), __PAGE_HYPERVISOR);
-#endif
 
         memmap_type = loader;
     }
@@ -814,13 +800,10 @@ void __init __start_xen(unsigned long mb
         {
             end = min(e, limit);
             set_pdx_range(s >> PAGE_SHIFT, end >> PAGE_SHIFT);
-#ifdef CONFIG_X86_64
             map_pages_to_xen((unsigned long)__va(s), s >> PAGE_SHIFT,
                              (end - s) >> PAGE_SHIFT, PAGE_HYPERVISOR);
-#endif
         }
 
-#if defined(CONFIG_X86_64)
         e = min_t(uint64_t, e, 1ULL << (PAGE_SHIFT + 32));
 #define reloc_size ((__pa(&_end) + mask) & ~mask)
         /* Is the region suitable for relocating Xen? */
@@ -916,7 +899,6 @@ void __init __start_xen(unsigned long mb
 
             bootstrap_map(NULL);
         }
-#endif
 
         /* Is the region suitable for relocating the multiboot modules? */
         for ( j = mbi->mods_count - 1; j >= 0; j-- )
@@ -943,10 +925,6 @@ void __init __start_xen(unsigned long mb
             }
         }
 
-#ifdef CONFIG_X86_32
-        /* Confine the kexec area to below 4Gb. */
-        e = min_t(uint64_t, e, 1ULL << 32);
-#endif
         /* Don't overlap with modules. */
         e = consider_modules(s, e, PAGE_ALIGN(kexec_crash_area.size),
                              mod, mbi->mods_count, -1);
@@ -966,17 +944,10 @@ void __init __start_xen(unsigned long mb
         reserve_e820_ram(&boot_e820, s, s + PAGE_ALIGN(mod[i].mod_end));
     }
 
-#if defined(CONFIG_X86_32)
-    xenheap_initial_phys_start = (PFN_UP(__pa(&_end)) + 1) << PAGE_SHIFT;
-    /* Must pass a single mapped page for populating bootmem_region_list. */
-    init_boot_pages(__pa(&_end), xenheap_initial_phys_start);
-    xenheap_phys_end = DIRECTMAP_MBYTES << 20;
-#else
     if ( !xen_phys_start )
         EARLY_FAIL("Not enough memory to relocate Xen.\n");
     reserve_e820_ram(&boot_e820, efi_enabled ? mbi->mem_upper : __pa(&_start),
                      __pa(&_end));
-#endif
 
     /* Late kexec reservation (dynamic start address). */
     kexec_reserve_area(&boot_e820);
@@ -990,22 +961,15 @@ void __init __start_xen(unsigned long mb
     for ( i = 0; i < boot_e820.nr_map; i++ )
     {
         uint64_t s, e, mask = PAGE_SIZE - 1;
-#ifdef CONFIG_X86_64
         uint64_t map_s, map_e;
-#endif
 
         /* Only page alignment required now. */
         s = (boot_e820.map[i].addr + mask) & ~mask;
         e = (boot_e820.map[i].addr + boot_e820.map[i].size) & ~mask;
-#if defined(CONFIG_X86_32)
-        s = max_t(uint64_t, s, xenheap_phys_end);
-#else
         s = max_t(uint64_t, s, 1<<20);
-#endif
         if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) )
             continue;
 
-#ifdef __x86_64__
         if ( !acpi_boot_table_init_done &&
              s >= (1ULL << 32) &&
              !acpi_boot_table_init() )
@@ -1042,11 +1006,9 @@ void __init __start_xen(unsigned long mb
                                   " %013"PRIx64"-%013"PRIx64"\n",
                    e, map_e);
         }
-#endif
 
         set_pdx_range(s >> PAGE_SHIFT, e >> PAGE_SHIFT);
 
-#ifdef CONFIG_X86_64
         /* Need to create mappings above BOOTSTRAP_MAP_BASE. */
         map_s = max_t(uint64_t, s, BOOTSTRAP_MAP_BASE);
         map_e = min_t(uint64_t, e,
@@ -1080,29 +1042,22 @@ void __init __start_xen(unsigned long mb
                              (map_s - s) >> PAGE_SHIFT, PAGE_HYPERVISOR);
             init_boot_pages(s, map_s);
         }
-#else
-        init_boot_pages(s, e);
-#endif
     }
 
     for ( i = 0; i < mbi->mods_count; ++i )
     {
         set_pdx_range(mod[i].mod_start,
                       mod[i].mod_start + PFN_UP(mod[i].mod_end));
-#ifdef CONFIG_X86_64
         map_pages_to_xen((unsigned long)mfn_to_virt(mod[i].mod_start),
                          mod[i].mod_start,
                          PFN_UP(mod[i].mod_end), PAGE_HYPERVISOR);
-#endif
     }
-#ifdef CONFIG_X86_64
     map_pages_to_xen((unsigned long)__va(kexec_crash_area.start),
                      kexec_crash_area.start >> PAGE_SHIFT,
                      PFN_UP(kexec_crash_area.size), PAGE_HYPERVISOR);
     xen_virt_end = ((unsigned long)_end + (1UL << L2_PAGETABLE_SHIFT) - 1) &
                    ~((1UL << L2_PAGETABLE_SHIFT) - 1);
     destroy_xen_mappings(xen_virt_end, XEN_VIRT_START + BOOTSTRAP_MAP_BASE);
-#endif
 
     memguard_init();
 
@@ -1151,30 +1106,10 @@ void __init __start_xen(unsigned long mb
 
     numa_initmem_init(0, max_page);
 
-#if defined(CONFIG_X86_32)
-    /* Initialise the Xen heap. */
-    for ( nr_pages = i = 0; i < boot_e820.nr_map; i++ )
-    {
-        uint64_t s = boot_e820.map[i].addr;
-        uint64_t e = s + boot_e820.map[i].size;
-        s = max_t(uint64_t, s, xenheap_initial_phys_start);
-        e = min_t(uint64_t, e, xenheap_phys_end);
-        if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) )
-            continue;
-        init_xenheap_pages(s, e);
-        nr_pages += (e - s) >> PAGE_SHIFT;
-    }
-    printk("Xen heap: %luMB (%lukB)\n", 
-           nr_pages >> (20 - PAGE_SHIFT),
-           nr_pages << (PAGE_SHIFT - 10));
-#endif
-
     end_boot_allocator();
     system_state = SYS_STATE_boot;
 
-#if defined(CONFIG_X86_64)
     vesa_init();
-#endif
 
     softirq_init();
     tasklet_subsys_init();
@@ -1217,10 +1152,8 @@ void __init __start_xen(unsigned long mb
         max_cpus = nr_cpu_ids;
     }
 
-#ifdef CONFIG_X86_64
     /* Low mappings were only needed for some BIOS table parsing. */
     zap_low_mappings();
-#endif
 
     init_apic_mappings();
 
@@ -1268,11 +1201,9 @@ void __init __start_xen(unsigned long mb
 
     pt_pci_init();
 
-#ifdef CONFIG_X86_64
     vesa_mtrr_init();
 
     acpi_mmcfg_init();
-#endif
 
     iommu_setup();    /* setup iommu if available */
 
@@ -1406,10 +1337,8 @@ void arch_get_xen_caps(xen_capabilities_
 
     (*info)[0] = '\0';
 
-#ifdef CONFIG_X86_64
     snprintf(s, sizeof(s), "xen-%d.%d-x86_64 ", major, minor);
     safe_strcat(*info, s);
-#endif
     snprintf(s, sizeof(s), "xen-%d.%d-x86_32p ", major, minor);
     safe_strcat(*info, s);
     if ( hvm_enabled )
@@ -1418,10 +1347,8 @@ void arch_get_xen_caps(xen_capabilities_
         safe_strcat(*info, s);
         snprintf(s, sizeof(s), "hvm-%d.%d-x86_32p ", major, minor);
         safe_strcat(*info, s);
-#ifdef CONFIG_X86_64
         snprintf(s, sizeof(s), "hvm-%d.%d-x86_64 ", major, minor);
         safe_strcat(*info, s);
-#endif
     }
 }
 
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/shutdown.c
--- a/xen/arch/x86/shutdown.c   Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/shutdown.c   Wed Sep 12 13:29:30 2012 +0100
@@ -100,202 +100,6 @@ void machine_halt(void)
     __machine_halt(NULL);
 }
 
-#ifdef __i386__
-
-/* The following code and data reboots the machine by switching to real
-   mode and jumping to the BIOS reset entry point, as if the CPU has
-   really been reset.  The previous version asked the keyboard
-   controller to pulse the CPU reset line, which is more thorough, but
-   doesn't work with at least one type of 486 motherboard.  It is easy
-   to stop this code working; hence the copious comments. */
-
-static unsigned long long
-real_mode_gdt_entries [3] =
-{
-    0x0000000000000000ULL,      /* Null descriptor */
-    0x00009a000000ffffULL,      /* 16-bit real-mode 64k code at 0x00000000 */
-    0x000092000100ffffULL       /* 16-bit real-mode 64k data at 0x00000100 */
-};
-
-static const struct
-{
-    unsigned short       size __attribute__ ((packed));
-    unsigned long long * base __attribute__ ((packed));
-}
-real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, real_mode_gdt_entries },
-real_mode_idt = { 0x3ff, NULL };
-
-
-/* This is 16-bit protected mode code to disable paging and the cache,
-   switch to real mode and jump to the BIOS reset code.
-
-   The instruction that switches to real mode by writing to CR0 must be
-   followed immediately by a far jump instruction, which set CS to a
-   valid value for real mode, and flushes the prefetch queue to avoid
-   running instructions that have already been decoded in protected
-   mode.
-
-   Clears all the flags except ET, especially PG (paging), PE
-   (protected-mode enable) and TS (task switch for coprocessor state
-   save).  Flushes the TLB after paging has been disabled.  Sets CD and
-   NW, to disable the cache on a 486, and invalidates the cache.  This
-   is more like the state of a 486 after reset.  I don't know if
-   something else should be done for other chips.
-
-   More could be done here to set up the registers as if a CPU reset had
-   occurred; hopefully real BIOSs don't assume much. */
-
-static const unsigned char real_mode_switch [] =
-{
-    0x0f, 0x20, 0xc0,                           /*    movl  %cr0,%eax        */
-    0x66, 0x83, 0xe0, 0x11,                     /*    andl  $0x00000011,%eax */
-    0x66, 0x0d, 0x00, 0x00, 0x00, 0x60,         /*    orl   $0x60000000,%eax */
-    0x0f, 0x22, 0xc0,                           /*    movl  %eax,%cr0        */
-    0x0f, 0x22, 0xd8,                           /*    movl  %eax,%cr3        */
-    0x0f, 0x20, 0xc2,                           /*    movl  %cr0,%edx        */
-    0x66, 0x81, 0xe2, 0x00, 0x00, 0x00, 0x60,   /*    andl  $0x60000000,%edx */
-    0x74, 0x02,                                 /*    jz    f                */
-    0x0f, 0x09,                                 /*    wbinvd                 */
-    0x24, 0x10,                                 /* f: andb  $0x10,al         */
-    0x0f, 0x22, 0xc0                            /*    movl  %eax,%cr0        */
-};
-#define MAX_LENGTH 0x40
-static const unsigned char jump_to_bios [] =
-{
-    0xea, 0xf0, 0xff, 0x00, 0xf0                /*    ljmp  $0xf000,$0xfff0  */
-};
-
-/*
- * Switch to real mode and then execute the code
- * specified by the code and length parameters.
- * We assume that length will aways be less that MAX_LENGTH!
- */
-static void machine_real_restart(const unsigned char *code, unsigned length)
-{
-    local_irq_disable();
-
-    /* Write zero to CMOS register number 0x0f, which the BIOS POST
-       routine will recognize as telling it to do a proper reboot.  (Well
-       that's what this book in front of me says -- it may only apply to
-       the Phoenix BIOS though, it's not clear).  At the same time,
-       disable NMIs by setting the top bit in the CMOS address register,
-       as we're about to do peculiar things to the CPU. */
-
-    spin_lock(&rtc_lock);
-    CMOS_WRITE(0x00, 0x8f);
-    spin_unlock(&rtc_lock);
-
-    /* Identity-map virtual address zero. */
-
-    map_pages_to_xen(0, 0, 1, __PAGE_HYPERVISOR|MAP_SMALL_PAGES);
-    set_current(idle_vcpu[0]);
-    write_ptbase(idle_vcpu[0]);
-
-    /* For the switch to real mode, copy some code to low memory.  It has
-       to be in the first 64k because it is running in 16-bit mode, and it
-       has to have the same physical and virtual address, because it turns
-       off paging.  Copy it near the end of the first page, out of the way
-       of BIOS variables. */
-
-    memcpy((void *)(PAGE_SIZE - sizeof(real_mode_switch) - MAX_LENGTH),
-           real_mode_switch, sizeof(real_mode_switch));
-    memcpy((void *)(PAGE_SIZE - MAX_LENGTH), code, length);
-
-    /* Set up the IDT for real mode. */
-
-    __asm__ __volatile__("lidt %0": : "m" (real_mode_idt));
-
-    /* Set up a GDT from which we can load segment descriptors for real
-       mode.  The GDT is not used in real mode; it is just needed here to
-       prepare the descriptors. */
-
-    __asm__ __volatile__("lgdt %0": : "m" (real_mode_gdt));
-
-    /* Load the data segment registers, and thus the descriptors ready for
-       real mode.  The base address of each segment is 0x100, 16 times the
-       selector value being loaded here.  This is so that the segment
-       registers don't have to be reloaded after switching to real mode:
-       the values are consistent for real mode operation already. */
-
-    __asm__ __volatile__ ("\tmov %0,%%ds\n"
-                          "\tmov %0,%%es\n"
-                          "\tmov %0,%%fs\n"
-                          "\tmov %0,%%gs\n"
-                          "\tmov %0,%%ss"
-                          :
-                          : "r" (0x0010));
-
-    /* Jump to the 16-bit code that we copied earlier.  It disables paging
-       and the cache, switches to real mode, and jumps to the BIOS reset
-       entry point. */
-
-    __asm__ __volatile__ ("ljmp $0x0008,%0"
-                          :
-                          : "i" ((void *)(PAGE_SIZE -
-                                          sizeof(real_mode_switch) -
-                                          MAX_LENGTH)));
-}
-
-static int __init set_bios_reboot(struct dmi_system_id *d)
-{
-    if ( reboot_type != BOOT_BIOS )
-    {
-        reboot_type = BOOT_BIOS;
-        printk("%s series board detected. "
-               "Selecting BIOS-method for reboots.\n", d->ident);
-    }
-    return 0;
-}
-
-static struct dmi_system_id __initdata reboot_dmi_table[] = {
-    {    /* Handle problems with rebooting on Dell 1300's */
-        .callback = set_bios_reboot,
-        .ident = "Dell PowerEdge 1300",
-        .matches = {
-            DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
-            DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
-        },
-    },
-    {    /* Handle problems with rebooting on Dell 300's */
-        .callback = set_bios_reboot,
-        .ident = "Dell PowerEdge 300",
-        .matches = {
-            DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
-            DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
-        },
-    },
-    {    /* Handle problems with rebooting on Dell 2400's */
-        .callback = set_bios_reboot,
-        .ident = "Dell PowerEdge 2400",
-        .matches = {
-            DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
-            DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
-        },
-    },
-    {    /* Handle problems with rebooting on HP laptops */
-        .callback = set_bios_reboot,
-        .ident = "HP Compaq Laptop",
-        .matches = {
-            DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-            DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
-        },
-    },
-    { }
-};
-
-static int __init reboot_init(void)
-{
-    dmi_check_system(reboot_dmi_table);
-    return 0;
-}
-__initcall(reboot_init);
-
-#else /* __x86_64__ */
-
-#define machine_real_restart(x, y)
-
-#endif
-
 static void __machine_restart(void *pdelay)
 {
     machine_restart(*(unsigned int *)pdelay);
@@ -371,7 +175,7 @@ void machine_restart(unsigned int delay_
             reboot_type = BOOT_KBD;
             break;
         case BOOT_BIOS:
-            machine_real_restart(jump_to_bios, sizeof(jump_to_bios));
+            /* unsupported on x86_64 */
             reboot_type = BOOT_KBD;
             break;
         case BOOT_ACPI:
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/smpboot.c    Wed Sep 12 13:29:30 2012 +0100
@@ -660,9 +660,7 @@ static void cpu_smpboot_free(unsigned in
     order = get_order_from_pages(NR_RESERVED_GDT_PAGES);
     free_xenheap_pages(per_cpu(gdt_table, cpu), order);
 
-#ifdef __x86_64__
     free_xenheap_pages(per_cpu(compat_gdt_table, cpu), order);
-#endif
 
     order = get_order_from_bytes(IDT_ENTRIES * sizeof(idt_entry_t));
     free_xenheap_pages(idt_tables[cpu], order);
@@ -695,14 +693,12 @@ static int cpu_smpboot_alloc(unsigned in
     BUILD_BUG_ON(NR_CPUS > 0x10000);
     gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
 
-#ifdef __x86_64__
     per_cpu(compat_gdt_table, cpu) = gdt =
         alloc_xenheap_pages(order, MEMF_node(cpu_to_node(cpu)));
     if ( gdt == NULL )
         goto oom;
     memcpy(gdt, boot_cpu_compat_gdt_table, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
     gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
-#endif
 
     order = get_order_from_bytes(IDT_ENTRIES * sizeof(idt_entry_t));
     idt_tables[cpu] = alloc_xenheap_pages(order, MEMF_node(cpu_to_node(cpu)));
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/srat.c
--- a/xen/arch/x86/srat.c       Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/srat.c       Wed Sep 12 13:29:30 2012 +0100
@@ -115,7 +115,6 @@ static __init void bad_srat(void)
                pxm2node[i] = NUMA_NO_NODE;
 }
 
-#ifdef CONFIG_X86_64
 /*
  * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
  * up the NUMA heuristics which wants the local node to have a smaller
@@ -157,11 +156,6 @@ void __init acpi_numa_slit_init(struct a
        acpi_slit = mfn_to_virt(mfn);
        memcpy(acpi_slit, slit, slit->header.length);
 }
-#else
-void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
-{
-}
-#endif
 
 /* Callback for Proximity Domain -> x2APIC mapping */
 void __init
@@ -267,9 +261,7 @@ acpi_numa_memory_affinity_init(struct ac
        {
                printk(KERN_INFO "SRAT: hot plug zone found %"PRIx64" - 
%"PRIx64" \n",
                                start, end);
-#ifdef CONFIG_X86_64
                mem_hotplug = 1;
-#endif
        }
 
        i = conflicting_memblks(start, end);
@@ -348,8 +340,6 @@ static int nodes_cover_memory(void)
 
 void __init acpi_numa_arch_fixup(void) {}
 
-#ifdef __x86_64__
-
 static u64 __initdata srat_region_mask;
 
 static u64 __init fill_mask(u64 mask)
@@ -411,8 +401,6 @@ void __init srat_parse_regions(u64 addr)
        pfn_pdx_hole_setup(mask >> PAGE_SHIFT);
 }
 
-#endif /* __x86_64__ */
-
 /* Use the information discovered above to actually set up the nodes. */
 int __init acpi_scan_nodes(u64 start, u64 end)
 {
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/time.c       Wed Sep 12 13:29:30 2012 +0100
@@ -120,31 +120,15 @@ static inline u32 mul_frac(u32 multiplic
 static inline u64 scale_delta(u64 delta, struct time_scale *scale)
 {
     u64 product;
-#ifdef CONFIG_X86_32
-    u32 tmp1, tmp2;
-#endif
 
     if ( scale->shift < 0 )
         delta >>= -scale->shift;
     else
         delta <<= scale->shift;
 
-#ifdef CONFIG_X86_32
-    asm (
-        "mul  %5       ; "
-        "mov  %4,%%eax ; "
-        "mov  %%edx,%4 ; "
-        "mul  %5       ; "
-        "xor  %5,%5    ; "
-        "add  %4,%%eax ; "
-        "adc  %5,%%edx ; "
-        : "=A" (product), "=r" (tmp1), "=r" (tmp2)
-        : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (scale->mul_frac) );
-#else
     asm (
         "mul %%rdx ; shrd $32,%%rdx,%%rax"
         : "=a" (product) : "0" (delta), "d" ((u64)scale->mul_frac) );
-#endif
 
     return product;
 }
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/trace.c
--- a/xen/arch/x86/trace.c      Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/trace.c      Wed Sep 12 13:29:30 2012 +0100
@@ -6,16 +6,10 @@
 #include <xen/sched.h>
 #include <xen/trace.h>
 
-#ifndef __x86_64__
-#undef TRC_64_FLAG
-#define TRC_64_FLAG 0
-#endif
-
 void trace_hypercall(void)
 {
     struct cpu_user_regs *regs = guest_cpu_user_regs();
 
-#ifdef __x86_64__
     if ( is_pv_32on64_vcpu(current) )
     {
         struct {
@@ -28,7 +22,6 @@ void trace_hypercall(void)
         __trace_var(TRC_PV_HYPERCALL, 1, sizeof(d), &d);
     }
     else
-#endif
     {
         struct {
             unsigned long eip;
@@ -48,7 +41,6 @@ void trace_hypercall(void)
 void __trace_pv_trap(int trapnr, unsigned long eip,
                      int use_error_code, unsigned error_code)
 {
-#ifdef __x86_64__
     if ( is_pv_32on64_vcpu(current) )
     {
         struct {
@@ -66,7 +58,6 @@ void __trace_pv_trap(int trapnr, unsigne
         __trace_var(TRC_PV_TRAP, 1, sizeof(d), &d);
     }
     else
-#endif        
     {
         struct {
             unsigned long eip;
@@ -91,7 +82,6 @@ void __trace_pv_page_fault(unsigned long
 {
     unsigned long eip = guest_cpu_user_regs()->eip;
 
-#ifdef __x86_64__
     if ( is_pv_32on64_vcpu(current) )
     {
         struct {
@@ -105,7 +95,6 @@ void __trace_pv_page_fault(unsigned long
         __trace_var(TRC_PV_PAGE_FAULT, 1, sizeof(d), &d);
     }
     else
-#endif        
     {
         struct {
             unsigned long eip, addr;
@@ -124,14 +113,12 @@ void __trace_pv_page_fault(unsigned long
 
 void __trace_trap_one_addr(unsigned event, unsigned long va)
 {
-#ifdef __x86_64__
     if ( is_pv_32on64_vcpu(current) )
     {
         u32 d = va;
         __trace_var(event, 1, sizeof(d), &d);
     }
     else
-#endif        
     {
         event |= TRC_64_FLAG;
         __trace_var(event, 1, sizeof(va), &va);
@@ -141,7 +128,6 @@ void __trace_trap_one_addr(unsigned even
 void __trace_trap_two_addr(unsigned event, unsigned long va1,
                            unsigned long va2)
 {
-#ifdef __x86_64__
     if ( is_pv_32on64_vcpu(current) )
     {
         struct {
@@ -152,7 +138,6 @@ void __trace_trap_two_addr(unsigned even
         __trace_var(event, 1, sizeof(d), &d);
     }
     else
-#endif        
     {
         struct {
             unsigned long va1, va2;
@@ -176,7 +161,6 @@ void __trace_ptwr_emulation(unsigned lon
      * cases, "unsigned long" is the size of a guest virtual address.
      */
 
-#ifdef __x86_64__
     if ( is_pv_32on64_vcpu(current) )
     {
         struct {
@@ -190,7 +174,6 @@ void __trace_ptwr_emulation(unsigned lon
         __trace_var(TRC_PV_PTWR_EMULATION_PAE, 1, sizeof(d), &d);
     }
     else
-#endif        
     {
         struct {
             l1_pgentry_t pte;
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/traps.c      Wed Sep 12 13:29:30 2012 +0100
@@ -111,13 +111,8 @@ integer_param("debug_stack_lines", debug
 static bool_t __devinitdata opt_ler;
 boolean_param("ler", opt_ler);
 
-#ifdef CONFIG_X86_32
-#define stack_words_per_line 8
-#define ESP_BEFORE_EXCEPTION(regs) ((unsigned long *)&regs->esp)
-#else
 #define stack_words_per_line 4
 #define ESP_BEFORE_EXCEPTION(regs) ((unsigned long *)regs->rsp)
-#endif
 
 static void show_guest_stack(struct vcpu *v, struct cpu_user_regs *regs)
 {
@@ -157,14 +152,7 @@ static void show_guest_stack(struct vcpu
         struct vcpu *vcpu;
 
         ASSERT(guest_kernel_mode(v, regs));
-#ifndef __x86_64__
-        addr = read_cr3();
-        for_each_vcpu( v->domain, vcpu )
-            if ( vcpu->arch.cr3 == addr )
-                break;
-#else
         vcpu = maddr_get_owner(read_cr3()) == v->domain ? v : NULL;
-#endif
         if ( !vcpu )
         {
             stack = do_page_walk(v, (unsigned long)stack);
@@ -387,7 +375,6 @@ unsigned long *get_x86_gpr(struct cpu_us
     case  5: p = &regs->ebp; break;
     case  6: p = &regs->esi; break;
     case  7: p = &regs->edi; break;
-#if defined(__x86_64__)
     case  8: p = &regs->r8;  break;
     case  9: p = &regs->r9;  break;
     case 10: p = &regs->r10; break;
@@ -396,7 +383,6 @@ unsigned long *get_x86_gpr(struct cpu_us
     case 13: p = &regs->r13; break;
     case 14: p = &regs->r14; break;
     case 15: p = &regs->r15; break;
-#endif
     default: p = NULL; break;
     }
 
@@ -823,10 +809,6 @@ static void pv_cpuid(struct cpu_user_reg
         /* Modify Feature Information. */
         if ( !cpu_has_sep )
             __clear_bit(X86_FEATURE_SEP, &d);
-#ifdef __i386__
-        if ( !supervisor_mode_kernel )
-            __clear_bit(X86_FEATURE_SEP, &d);
-#endif
         __clear_bit(X86_FEATURE_DS, &d);
         __clear_bit(X86_FEATURE_ACC, &d);
         __clear_bit(X86_FEATURE_PBE, &d);
@@ -879,10 +861,8 @@ static void pv_cpuid(struct cpu_user_reg
             __clear_bit(X86_FEATURE_LM % 32, &d);
             __clear_bit(X86_FEATURE_LAHF_LM % 32, &c);
         }
-#ifndef __i386__
         if ( is_pv_32on64_vcpu(current) &&
              boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
-#endif
             __clear_bit(X86_FEATURE_SYSCALL % 32, &d);
         __clear_bit(X86_FEATURE_PAGE1GB % 32, &d);
         __clear_bit(X86_FEATURE_RDTSCP % 32, &d);
@@ -1361,11 +1341,9 @@ static int fixup_page_fault(unsigned lon
              ptwr_do_page_fault(v, addr, regs) )
             return EXCRET_fault_fixed;
 
-#ifdef __x86_64__
         if ( IS_PRIV(d) && (regs->error_code & PFEC_page_present) &&
              mmio_ro_do_page_fault(v, addr, regs) )
             return EXCRET_fault_fixed;
-#endif
     }
 
     /* For non-external shadowed guests, we fix up both their own 
@@ -1566,7 +1544,6 @@ static int read_descriptor(unsigned int 
     return 1;
 }
 
-#ifdef __x86_64__
 static int read_gate_descriptor(unsigned int gate_sel,
                                 const struct vcpu *v,
                                 unsigned int *sel,
@@ -1622,20 +1599,15 @@ static int read_gate_descriptor(unsigned
 
     return 1;
 }
-#endif
 
 /* Has the guest requested sufficient permission for this I/O access? */
 static int guest_io_okay(
     unsigned int port, unsigned int bytes,
     struct vcpu *v, struct cpu_user_regs *regs)
 {
-#if defined(__x86_64__)
     /* If in user mode, switch to kernel mode just to read I/O bitmap. */
     int user_mode = !(v->arch.flags & TF_kernel_mode);
 #define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(v)
-#elif defined(__i386__)
-#define TOGGLE_MODE() ((void)0)
-#endif
 
     if ( !vm86_mode(regs) &&
          (v->arch.pv_vcpu.iopl >= (guest_kernel_mode(v, regs) ? 1 : 3)) )
@@ -1889,11 +1861,7 @@ static inline uint64_t guest_misc_enable
     }                                                                       \
     (eip) += sizeof(_x); _x; })
 
-#if defined(CONFIG_X86_32)
-# define read_sreg(regs, sr) ((regs)->sr)
-#elif defined(CONFIG_X86_64)
-# define read_sreg(regs, sr) read_segment_register(sr)
-#endif
+#define read_sreg(regs, sr) read_segment_register(sr)
 
 static int is_cpufreq_controller(struct domain *d)
 {
@@ -1901,9 +1869,7 @@ static int is_cpufreq_controller(struct 
             (d->domain_id == 0));
 }
 
-#ifdef CONFIG_X86_64
 #include "x86_64/mmconfig.h"
-#endif
 
 static int emulate_privileged_op(struct cpu_user_regs *regs)
 {
@@ -2034,7 +2000,6 @@ static int emulate_privileged_op(struct 
                   (ar & _SEGMENT_CODE) || !(ar & _SEGMENT_WR)) )
                 goto fail;
         }
-#ifdef CONFIG_X86_64
         else
         {
             if ( lm_ovr == lm_seg_none || data_sel < 4 )
@@ -2062,7 +2027,6 @@ static int emulate_privileged_op(struct 
             data_limit = ~0UL;
             ar = _SEGMENT_WR|_SEGMENT_S|_SEGMENT_DPL|_SEGMENT_P;
         }
-#endif
 
         port = (u16)regs->edx;
 
@@ -2126,7 +2090,6 @@ static int emulate_privileged_op(struct 
      * GPR context. This is needed for some systems which (ab)use IN/OUT
      * to communicate with BIOS code in system-management mode.
      */
-#ifdef __x86_64__
     /* movq $host_to_guest_gpr_switch,%rcx */
     io_emul_stub[0] = 0x48;
     io_emul_stub[1] = 0xb9;
@@ -2134,14 +2097,6 @@ static int emulate_privileged_op(struct 
     /* callq *%rcx */
     io_emul_stub[10] = 0xff;
     io_emul_stub[11] = 0xd1;
-#else
-    /* call host_to_guest_gpr_switch */
-    io_emul_stub[0] = 0xe8;
-    *(s32 *)&io_emul_stub[1] =
-        (char *)host_to_guest_gpr_switch - &io_emul_stub[5];
-    /* 7 x nop */
-    memset(&io_emul_stub[5], 0x90, 7);
-#endif
     /* data16 or nop */
     io_emul_stub[12] = (op_bytes != 2) ? 0x90 : 0x66;
     /* <io-access opcode> */
@@ -2443,7 +2398,6 @@ static int emulate_privileged_op(struct 
         msr_content = ((uint64_t)edx << 32) | eax;
         switch ( (u32)regs->ecx )
         {
-#ifdef CONFIG_X86_64
         case MSR_FS_BASE:
             if ( is_pv_32on64_vcpu(v) )
                 goto fail;
@@ -2465,7 +2419,6 @@ static int emulate_privileged_op(struct 
                 goto fail;
             v->arch.pv_vcpu.gs_base_user = msr_content;
             break;
-#endif
         case MSR_K7_FID_VID_STATUS:
         case MSR_K7_FID_VID_CTL:
         case MSR_K8_PSTATE_LIMIT:
@@ -2509,10 +2462,8 @@ static int emulate_privileged_op(struct 
             if ( (rdmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, val) != 0) )
                 goto fail;
             if (
-#ifdef CONFIG_X86_64
                  (pci_probe & PCI_PROBE_MASK) == PCI_PROBE_MMCONF ?
                  val != msr_content :
-#endif
                  ((val ^ msr_content) &
                   ~( FAM10H_MMIO_CONF_ENABLE |
                     (FAM10H_MMIO_CONF_BUSRANGE_MASK <<
@@ -2600,7 +2551,6 @@ static int emulate_privileged_op(struct 
     case 0x32: /* RDMSR */
         switch ( (u32)regs->ecx )
         {
-#ifdef CONFIG_X86_64
         case MSR_FS_BASE:
             if ( is_pv_32on64_vcpu(v) )
                 goto fail;
@@ -2619,7 +2569,6 @@ static int emulate_privileged_op(struct 
             regs->eax = v->arch.pv_vcpu.gs_base_user & 0xFFFFFFFFUL;
             regs->edx = v->arch.pv_vcpu.gs_base_user >> 32;
             break;
-#endif
         case MSR_K7_FID_VID_CTL:
         case MSR_K7_FID_VID_STATUS:
         case MSR_K8_PSTATE_LIMIT:
@@ -2714,7 +2663,6 @@ static inline int check_stack_limit(unsi
 
 static void emulate_gate_op(struct cpu_user_regs *regs)
 {
-#ifdef __x86_64__
     struct vcpu *v = current;
     unsigned int sel, ar, dpl, nparm, opnd_sel;
     unsigned int op_default, op_bytes, ad_default, ad_bytes;
@@ -3071,7 +3019,6 @@ static void emulate_gate_op(struct cpu_u
 
     regs->cs = sel;
     instruction_done(regs, off, 0);
-#endif
 }
 
 void do_general_protection(struct cpu_user_regs *regs)
@@ -3134,16 +3081,6 @@ void do_general_protection(struct cpu_us
         return;
     }
 
-#if defined(__i386__)
-    if ( VM_ASSIST(v->domain, VMASST_TYPE_4gb_segments) && 
-         (regs->error_code == 0) && 
-         gpf_emulate_4gb(regs) )
-    {
-        TRACE_1D(TRC_PV_EMULATE_4GB, regs->eip);
-        return;
-    }
-#endif
-
     /* Pass on GPF as is. */
     do_guest_trap(TRAP_gp_fault, regs, 1);
     return;
@@ -3425,7 +3362,6 @@ void do_debug(struct cpu_user_regs *regs
     {
         if ( regs->eflags & X86_EFLAGS_TF )
         {
-#ifdef __x86_64__
             /* In SYSENTER entry path we can't zap TF until EFLAGS is saved. */
             if ( (regs->rip >= (unsigned long)sysenter_entry) &&
                  (regs->rip <= (unsigned long)sysenter_eflags_saved) )
@@ -3434,7 +3370,6 @@ void do_debug(struct cpu_user_regs *regs
                     regs->eflags &= ~X86_EFLAGS_TF;
                 goto out;
             }
-#endif
             if ( !debugger_trap_fatal(TRAP_debug, regs) )
             {
                 WARN_ON(1);
@@ -3816,12 +3751,6 @@ long set_debugreg(struct vcpu *v, int re
                         return -EPERM;
                     io_enable |= value & (3 << ((i - 16) >> 1));
                 }
-#ifdef __i386__
-                if ( ((boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) ||
-                      !boot_cpu_has(X86_FEATURE_LM)) &&
-                     (((value >> i) & 0xc) == DR_LEN_8) )
-                    return -EPERM;
-#endif
             }
 
             /* Guest DR5 is a handy stash for I/O intercept information. */
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/x86_32/Makefile
--- a/xen/arch/x86/x86_32/Makefile      Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,12 +0,0 @@
-obj-y += domain_page.o
-obj-bin-y += entry.o
-obj-bin-y += gpr_switch.o
-obj-y += mm.o
-obj-y += seg_fixup.o
-obj-y += traps.o
-obj-y += machine_kexec.o
-obj-y += pci.o
-
-obj-$(crash_debug) += gdbstub.o
-
-obj-bin-$(supervisor_mode_kernel) += supervisor_mode_kernel.o
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/x86_32/asm-offsets.c
--- a/xen/arch/x86/x86_32/asm-offsets.c Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,140 +0,0 @@
-/*
- * Generate definitions needed by assembly language modules.
- * This code generates raw asm output which is post-processed
- * to extract and format the required data.
- */
-#define COMPILE_OFFSETS
-
-#include <xen/config.h>
-#include <xen/perfc.h>
-#include <xen/sched.h>
-#include <asm/fixmap.h>
-#include <asm/hardirq.h>
-#include <xen/multiboot.h>
-
-#define DEFINE(_sym, _val) \
-    __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) )
-#define BLANK() \
-    __asm__ __volatile__ ( "\n->" : : )
-#define OFFSET(_sym, _str, _mem) \
-    DEFINE(_sym, offsetof(_str, _mem));
-
-/* base-2 logarithm */
-#define __L2(_x)  (((_x) & 0x00000002) ?   1 : 0)
-#define __L4(_x)  (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x))
-#define __L8(_x)  (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x))
-#define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x))
-#define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x))
-
-void __dummy__(void)
-{
-    OFFSET(UREGS_eax, struct cpu_user_regs, eax);
-    OFFSET(UREGS_ebx, struct cpu_user_regs, ebx);
-    OFFSET(UREGS_ecx, struct cpu_user_regs, ecx);
-    OFFSET(UREGS_edx, struct cpu_user_regs, edx);
-    OFFSET(UREGS_esi, struct cpu_user_regs, esi);
-    OFFSET(UREGS_edi, struct cpu_user_regs, edi);
-    OFFSET(UREGS_esp, struct cpu_user_regs, esp);
-    OFFSET(UREGS_ebp, struct cpu_user_regs, ebp);
-    OFFSET(UREGS_eip, struct cpu_user_regs, eip);
-    OFFSET(UREGS_cs, struct cpu_user_regs, cs);
-    OFFSET(UREGS_ds, struct cpu_user_regs, ds);
-    OFFSET(UREGS_es, struct cpu_user_regs, es);
-    OFFSET(UREGS_fs, struct cpu_user_regs, fs);
-    OFFSET(UREGS_gs, struct cpu_user_regs, gs);
-    OFFSET(UREGS_ss, struct cpu_user_regs, ss);
-    OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
-    OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
-    OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
-    OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
-    OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, esp);
-    DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
-    BLANK();
-
-    OFFSET(VCPU_processor, struct vcpu, processor);
-    OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
-    OFFSET(VCPU_trap_bounce, struct vcpu, arch.pv_vcpu.trap_bounce);
-    OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
-    OFFSET(VCPU_event_sel, struct vcpu, arch.pv_vcpu.event_callback_cs);
-    OFFSET(VCPU_event_addr, struct vcpu, arch.pv_vcpu.event_callback_eip);
-    OFFSET(VCPU_failsafe_sel, struct vcpu,
-           arch.pv_vcpu.failsafe_callback_cs);
-    OFFSET(VCPU_failsafe_addr, struct vcpu,
-           arch.pv_vcpu.failsafe_callback_eip);
-    OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv_vcpu.kernel_ss);
-    OFFSET(VCPU_kernel_sp, struct vcpu, arch.pv_vcpu.kernel_sp);
-    OFFSET(VCPU_guest_context_flags, struct vcpu, arch.vgc_flags);
-    OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
-    OFFSET(VCPU_mce_pending, struct vcpu, mce_pending);
-    OFFSET(VCPU_nmi_old_mask, struct vcpu, nmi_state.old_mask);
-    OFFSET(VCPU_mce_old_mask, struct vcpu, mce_state.old_mask);
-    OFFSET(VCPU_async_exception_mask, struct vcpu, async_exception_mask);
-    DEFINE(VCPU_TRAP_NMI, VCPU_TRAP_NMI);
-    DEFINE(VCPU_TRAP_MCE, VCPU_TRAP_MCE);
-    DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
-    BLANK();
-
-    OFFSET(TSS_ss0, struct tss_struct, ss0);
-    OFFSET(TSS_esp0, struct tss_struct, esp0);
-    OFFSET(TSS_ss1, struct tss_struct, ss1);
-    OFFSET(TSS_esp1, struct tss_struct, esp1);
-    DEFINE(TSS_sizeof, sizeof(struct tss_struct));
-    BLANK();
-
-    OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
-    OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
-    OFFSET(VCPU_svm_vmcb_in_sync, struct vcpu, arch.hvm_svm.vmcb_in_sync);
-    BLANK();
-
-    OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
-    OFFSET(VCPU_vmx_realmode, struct vcpu, arch.hvm_vmx.vmx_realmode);
-    OFFSET(VCPU_vmx_emulate, struct vcpu, arch.hvm_vmx.vmx_emulate);
-    OFFSET(VCPU_vm86_seg_mask, struct vcpu, arch.hvm_vmx.vm86_segment_mask);
-    OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
-    BLANK();
-
-    OFFSET(VCPU_nhvm_guestmode, struct vcpu, arch.hvm_vcpu.nvcpu.nv_guestmode);
-    OFFSET(VCPU_nhvm_p2m, struct vcpu, arch.hvm_vcpu.nvcpu.nv_p2m);
-    OFFSET(VCPU_nsvm_hap_enabled, struct vcpu, 
arch.hvm_vcpu.nvcpu.u.nsvm.ns_hap_enabled);
-    BLANK();
-
-    OFFSET(VMCB_rax, struct vmcb_struct, rax);
-    OFFSET(VMCB_rip, struct vmcb_struct, rip);
-    OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
-    OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
-    BLANK();
-
-    OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
-    OFFSET(VCPUINFO_upcall_mask, vcpu_info_t, evtchn_upcall_mask);
-    BLANK();
-
-    OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs);
-    OFFSET(CPUINFO_processor_id, struct cpu_info, processor_id);
-    OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
-    DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
-    BLANK();
-
-    OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
-    OFFSET(TRAPBOUNCE_flags, struct trap_bounce, flags);
-    OFFSET(TRAPBOUNCE_cs, struct trap_bounce, cs);
-    OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
-    BLANK();
-
-#if PERF_COUNTERS
-    DEFINE(ASM_PERFC_hypercalls, PERFC_hypercalls);
-    DEFINE(ASM_PERFC_exceptions, PERFC_exceptions);
-    BLANK();
-#endif
-
-    DEFINE(FIXMAP_apic_base, fix_to_virt(FIX_APIC_BASE));
-    BLANK();
-
-    DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
-    BLANK();
-
-    OFFSET(CPUINFO86_ext_features, struct cpuinfo_x86, x86_capability[1]);
-    BLANK();
-
-    OFFSET(MB_flags, multiboot_info_t, flags);
-    OFFSET(MB_cmdline, multiboot_info_t, cmdline);
-}
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,279 +0,0 @@
-/******************************************************************************
- * domain_page.h
- * 
- * Allow temporary mapping of domain pages.
- * 
- * Copyright (c) 2003-2006, Keir Fraser <keir@xxxxxxxxxxxxx>
- */
-
-#include <xen/config.h>
-#include <xen/sched.h>
-#include <xen/mm.h>
-#include <xen/perfc.h>
-#include <xen/domain_page.h>
-#include <asm/current.h>
-#include <asm/flushtlb.h>
-#include <asm/hardirq.h>
-#include <asm/hvm/support.h>
-#include <asm/fixmap.h>
-
-static inline struct vcpu *mapcache_current_vcpu(void)
-{
-    struct vcpu *v;
-
-    /* In the common case we use the mapcache of the running VCPU. */
-    v = current;
-
-    /*
-     * If guest_table is NULL, and we are running a paravirtualised guest,
-     * then it means we are running on the idle domain's page table and must
-     * therefore use its mapcache.
-     */
-    if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !is_hvm_vcpu(v) )
-    {
-        /* If we really are idling, perform lazy context switch now. */
-        if ( (v = idle_vcpu[smp_processor_id()]) == current )
-            sync_local_execstate();
-        /* We must now be running on the idle page table. */
-        ASSERT(read_cr3() == __pa(idle_pg_table));
-    }
-
-    return v;
-}
-
-void *map_domain_page(unsigned long mfn)
-{
-    unsigned long va, flags;
-    unsigned int idx, i;
-    struct vcpu *v;
-    struct mapcache_domain *dcache;
-    struct mapcache_vcpu *vcache;
-    struct vcpu_maphash_entry *hashent;
-
-    perfc_incr(map_domain_page_count);
-
-    v = mapcache_current_vcpu();
-    /* Prevent vcpu pointer being used before initialize. */
-    ASSERT((unsigned long)v != 0xfffff000);
-
-    dcache = &v->domain->arch.mapcache;
-    vcache = &v->arch.mapcache;
-
-    local_irq_save(flags);
-
-    hashent = &vcache->hash[MAPHASH_HASHFN(mfn)];
-    if ( hashent->mfn == mfn )
-    {
-        idx = hashent->idx;
-        hashent->refcnt++;
-        ASSERT(idx < MAPCACHE_ENTRIES);
-        ASSERT(hashent->refcnt != 0);
-        ASSERT(l1e_get_pfn(dcache->l1tab[idx]) == mfn);
-        goto out;
-    }
-
-    spin_lock(&dcache->lock);
-
-    /* Has some other CPU caused a wrap? We must flush if so. */
-    if ( unlikely(dcache->epoch != vcache->shadow_epoch) )
-    {
-        vcache->shadow_epoch = dcache->epoch;
-        if ( NEED_FLUSH(this_cpu(tlbflush_time), dcache->tlbflush_timestamp) )
-        {
-            perfc_incr(domain_page_tlb_flush);
-            flush_tlb_local();
-        }
-    }
-
-    idx = find_next_zero_bit(dcache->inuse, MAPCACHE_ENTRIES, dcache->cursor);
-    if ( unlikely(idx >= MAPCACHE_ENTRIES) )
-    {
-        /* /First/, clean the garbage map and update the inuse list. */
-        for ( i = 0; i < ARRAY_SIZE(dcache->garbage); i++ )
-        {
-            unsigned long x = xchg(&dcache->garbage[i], 0);
-            dcache->inuse[i] &= ~x;
-        }
-
-        /* /Second/, flush TLBs. */
-        perfc_incr(domain_page_tlb_flush);
-        flush_tlb_local();
-        vcache->shadow_epoch = ++dcache->epoch;
-        dcache->tlbflush_timestamp = tlbflush_current_time();
-
-        idx = find_first_zero_bit(dcache->inuse, MAPCACHE_ENTRIES);
-        BUG_ON(idx >= MAPCACHE_ENTRIES);
-    }
-
-    set_bit(idx, dcache->inuse);
-    dcache->cursor = idx + 1;
-
-    spin_unlock(&dcache->lock);
-
-    l1e_write(&dcache->l1tab[idx], l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
-
- out:
-    local_irq_restore(flags);
-    va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT);
-    return (void *)va;
-}
-
-void unmap_domain_page(const void *va)
-{
-    unsigned int idx;
-    struct vcpu *v;
-    struct mapcache_domain *dcache;
-    unsigned long mfn, flags;
-    struct vcpu_maphash_entry *hashent;
-
-    ASSERT((void *)MAPCACHE_VIRT_START <= va);
-    ASSERT(va < (void *)MAPCACHE_VIRT_END);
-
-    v = mapcache_current_vcpu();
-
-    dcache = &v->domain->arch.mapcache;
-
-    idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
-    mfn = l1e_get_pfn(dcache->l1tab[idx]);
-    hashent = &v->arch.mapcache.hash[MAPHASH_HASHFN(mfn)];
-
-    local_irq_save(flags);
-
-    if ( hashent->idx == idx )
-    {
-        ASSERT(hashent->mfn == mfn);
-        ASSERT(hashent->refcnt != 0);
-        hashent->refcnt--;
-    }
-    else if ( hashent->refcnt == 0 )
-    {
-        if ( hashent->idx != MAPHASHENT_NOTINUSE )
-        {
-            /* /First/, zap the PTE. */
-            ASSERT(l1e_get_pfn(dcache->l1tab[hashent->idx]) == hashent->mfn);
-            l1e_write(&dcache->l1tab[hashent->idx], l1e_empty());
-            /* /Second/, mark as garbage. */
-            set_bit(hashent->idx, dcache->garbage);
-        }
-
-        /* Add newly-freed mapping to the maphash. */
-        hashent->mfn = mfn;
-        hashent->idx = idx;
-    }
-    else
-    {
-        /* /First/, zap the PTE. */
-        l1e_write(&dcache->l1tab[idx], l1e_empty());
-        /* /Second/, mark as garbage. */
-        set_bit(idx, dcache->garbage);
-    }
-
-    local_irq_restore(flags);
-}
-
-void mapcache_domain_init(struct domain *d)
-{
-    d->arch.mapcache.l1tab = d->arch.mm_perdomain_pt +
-        (GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
-    spin_lock_init(&d->arch.mapcache.lock);
-}
-
-void mapcache_vcpu_init(struct vcpu *v)
-{
-    unsigned int i;
-    struct vcpu_maphash_entry *hashent;
-
-    /* Mark all maphash entries as not in use. */
-    for ( i = 0; i < MAPHASH_ENTRIES; i++ )
-    {
-        hashent = &v->arch.mapcache.hash[i];
-        hashent->mfn = ~0UL; /* never valid to map */
-        hashent->idx = MAPHASHENT_NOTINUSE;
-    }
-}
-
-#define GLOBALMAP_BITS (IOREMAP_MBYTES << (20 - PAGE_SHIFT))
-static unsigned long inuse[BITS_TO_LONGS(GLOBALMAP_BITS)];
-static unsigned long garbage[BITS_TO_LONGS(GLOBALMAP_BITS)];
-static unsigned int inuse_cursor;
-static DEFINE_SPINLOCK(globalmap_lock);
-
-void *map_domain_page_global(unsigned long mfn)
-{
-    l2_pgentry_t *pl2e;
-    l1_pgentry_t *pl1e;
-    unsigned int idx, i;
-    unsigned long va;
-
-    ASSERT(!in_irq() && local_irq_is_enabled());
-
-    /* At least half the ioremap space should be available to us. */
-    BUILD_BUG_ON(IOREMAP_VIRT_START + (IOREMAP_MBYTES << 19) >= FIXADDR_START);
-
-    spin_lock(&globalmap_lock);
-
-    idx = find_next_zero_bit(inuse, GLOBALMAP_BITS, inuse_cursor);
-    va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
-    if ( unlikely(va >= FIXADDR_START) )
-    {
-        /* /First/, clean the garbage map and update the inuse list. */
-        for ( i = 0; i < ARRAY_SIZE(garbage); i++ )
-        {
-            unsigned long x = xchg(&garbage[i], 0);
-            inuse[i] &= ~x;
-        }
-
-        /* /Second/, flush all TLBs to get rid of stale garbage mappings. */
-        flush_tlb_all();
-
-        idx = find_first_zero_bit(inuse, GLOBALMAP_BITS);
-        va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
-        if ( unlikely(va >= FIXADDR_START) )
-        {
-            spin_unlock(&globalmap_lock);
-            return NULL;
-        }
-    }
-
-    set_bit(idx, inuse);
-    inuse_cursor = idx + 1;
-
-    spin_unlock(&globalmap_lock);
-
-    pl2e = virt_to_xen_l2e(va);
-    pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
-    l1e_write(pl1e, l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
-
-    return (void *)va;
-}
-
-void unmap_domain_page_global(const void *va)
-{
-    unsigned long __va = (unsigned long)va;
-    l2_pgentry_t *pl2e;
-    l1_pgentry_t *pl1e;
-    unsigned int idx;
-
-    ASSERT(__va >= IOREMAP_VIRT_START);
-
-    /* /First/, we zap the PTE. */
-    pl2e = virt_to_xen_l2e(__va);
-    pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va);
-    l1e_write(pl1e, l1e_empty());
-
-    /* /Second/, we add to the garbage map. */
-    idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT;
-    set_bit(idx, garbage);
-}
-
-/* Translate a map-domain-page'd address to the underlying MFN */
-unsigned long domain_page_map_to_mfn(void *va)
-{
-    l1_pgentry_t *l1e;
-
-    ASSERT( (((unsigned long) va) >= MAPCACHE_VIRT_START) &&
-            (((unsigned long) va) <= MAPCACHE_VIRT_END) );
-    l1e = &__linear_l1_table[
-            l1_linear_offset((unsigned long) va)];
-    return l1e_get_pfn(*l1e);
-}
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S       Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,757 +0,0 @@
-/*
- * Hypercall and fault low-level handling routines.
- *
- * Copyright (c) 2002-2004, K A Fraser
- * Copyright (c) 1991, 1992 Linus Torvalds
- * 
- * Calling back to a guest OS:
- * ===========================
- * 
- * First, we require that all callbacks (either via a supplied
- * interrupt-descriptor-table, or via the special event or failsafe callbacks
- * in the shared-info-structure) are to ring 1. This just makes life easier,
- * in that it means we don't have to do messy GDT/LDT lookups to find
- * out which the privilege-level of the return code-selector. That code
- * would just be a hassle to write, and would need to account for running
- * off the end of the GDT/LDT, for example. For all callbacks we check
- * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that 
- * we're safe as don't allow a guest OS to install ring-0 privileges into the
- * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
- * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
- * rather than the correct ring) and bad things are bound to ensue -- IRET is
- * likely to fault, and we may end up killing the domain (no harm can
- * come to Xen, though).
- *      
- * When doing a callback, we check if the return CS is in ring 0. If so,
- * callback is delayed until next return to ring != 0.
- * If return CS is in ring 1, then we create a callback frame
- * starting at return SS/ESP. The base of the frame does an intra-privilege
- * interrupt-return.
- * If return CS is in ring > 1, we create a callback frame starting
- * at SS/ESP taken from appropriate section of the current TSS. The base
- * of the frame does an inter-privilege interrupt-return.
- * 
- * Note that the "failsafe callback" uses a special stackframe:
- * { return_DS, return_ES, return_FS, return_GS, return_EIP,
- *   return_CS, return_EFLAGS[, return_ESP, return_SS] }
- * That is, original values for DS/ES/FS/GS are placed on stack rather than
- * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
- * saved/restored in guest OS. Furthermore, if we load them we may cause
- * a fault if they are invalid, which is a hassle to deal with. We avoid
- * that problem if we don't load them :-) This property allows us to use
- * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
- * on return to ring != 0, we can simply package it up as a return via
- * the failsafe callback, and let the guest OS sort it out (perhaps by
- * killing an application process). Note that we also do this for any
- * faulting IRET -- just let the guest OS handle it via the event
- * callback.
- *
- * We terminate a domain in the following cases:
- *  - creating a callback stack frame (due to bad ring-1 stack).
- *  - faulting IRET on entry to failsafe callback handler.
- * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
- * handler in good order (absolutely no faults allowed!).
- */
-
-#include <xen/config.h>
-#include <xen/errno.h>
-#include <xen/softirq.h>
-#include <asm/asm_defns.h>
-#include <asm/apicdef.h>
-#include <asm/page.h>
-#include <public/xen.h>
-
-        ALIGN
-restore_all_guest:
-        ASSERT_INTERRUPTS_DISABLED
-        testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
-        popl  %ebx
-        popl  %ecx
-        popl  %edx
-        popl  %esi
-        popl  %edi
-        popl  %ebp
-        popl  %eax
-        leal  4(%esp),%esp
-        jnz   .Lrestore_iret_guest
-#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
-        testb $2,UREGS_cs-UREGS_eip(%esp)
-        jnz   .Lrestore_sregs_guest
-        call  restore_ring0_guest
-        jmp   .Lrestore_iret_guest
-#endif
-.Lrestore_sregs_guest:
-.Lft1:  mov  UREGS_ds-UREGS_eip(%esp),%ds
-.Lft2:  mov  UREGS_es-UREGS_eip(%esp),%es
-.Lft3:  mov  UREGS_fs-UREGS_eip(%esp),%fs
-.Lft4:  mov  UREGS_gs-UREGS_eip(%esp),%gs
-.Lrestore_iret_guest:
-.Lft5:  iret
-.section .fixup,"ax"
-.Lfx1:  sti
-        SAVE_ALL_GPRS
-        mov   UREGS_error_code(%esp),%esi
-        pushfl                         # EFLAGS
-        movl  $__HYPERVISOR_CS,%eax
-        pushl %eax                     # CS
-        movl  $.Ldf1,%eax
-        pushl %eax                     # EIP
-        pushl %esi                     # error_code/entry_vector
-        jmp   handle_exception
-.Ldf1:  GET_CURRENT(%ebx)
-        jmp   test_all_events
-failsafe_callback:
-        GET_CURRENT(%ebx)
-        leal  VCPU_trap_bounce(%ebx),%edx
-        movl  VCPU_failsafe_addr(%ebx),%eax
-        movl  %eax,TRAPBOUNCE_eip(%edx)
-        movl  VCPU_failsafe_sel(%ebx),%eax
-        movw  %ax,TRAPBOUNCE_cs(%edx)
-        movb  $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
-        bt    $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%ebx)
-        jnc   1f
-        orb   $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
-1:      call  create_bounce_frame
-        xorl  %eax,%eax
-        movl  %eax,UREGS_ds(%esp)
-        movl  %eax,UREGS_es(%esp)
-        movl  %eax,UREGS_fs(%esp)
-        movl  %eax,UREGS_gs(%esp)
-        jmp   test_all_events
-.previous
-        _ASM_PRE_EXTABLE(.Lft1, .Lfx1)
-        _ASM_PRE_EXTABLE(.Lft2, .Lfx1)
-        _ASM_PRE_EXTABLE(.Lft3, .Lfx1)
-        _ASM_PRE_EXTABLE(.Lft4, .Lfx1)
-        _ASM_PRE_EXTABLE(.Lft5, .Lfx1)
-        _ASM_EXTABLE(.Ldf1, failsafe_callback)
-
-        ALIGN
-restore_all_xen:
-        popl %ebx
-        popl %ecx
-        popl %edx
-        popl %esi
-        popl %edi
-        popl %ebp
-        popl %eax
-        addl $4,%esp
-        iret
-
-ENTRY(hypercall)
-        subl $4,%esp
-        FIXUP_RING0_GUEST_STACK
-        SAVE_ALL(,1f)
-1:      sti
-        GET_CURRENT(%ebx)
-        cmpl  $NR_hypercalls,%eax
-        jae   bad_hypercall
-        PERFC_INCR(hypercalls, %eax, %ebx)
-#ifndef NDEBUG
-        /* Create shadow parameters and corrupt those not used by this call. */
-        pushl %eax
-        pushl UREGS_eip+4(%esp)
-        pushl 28(%esp) # EBP
-        pushl 28(%esp) # EDI
-        pushl 28(%esp) # ESI
-        pushl 28(%esp) # EDX
-        pushl 28(%esp) # ECX
-        pushl 28(%esp) # EBX
-        movzb hypercall_args_table(,%eax,1),%ecx
-        leal  (%esp,%ecx,4),%edi
-        subl  $6,%ecx
-        negl  %ecx
-        movl  %eax,%esi
-        movl  $0xDEADBEEF,%eax
-        rep   stosl
-        movl  %esi,%eax
-#define SHADOW_BYTES 32 /* 6 shadow parameters + EIP + hypercall # */
-#else
-        /* 
-         * We need shadow parameters even on non-debug builds. We depend on the
-         * original versions not being clobbered (needed to create a hypercall
-         * continuation). But that isn't guaranteed by the function-call ABI.
-         */ 
-        pushl 20(%esp) # EBP
-        pushl 20(%esp) # EDI
-        pushl 20(%esp) # ESI
-        pushl 20(%esp) # EDX
-        pushl 20(%esp) # ECX
-        pushl 20(%esp) # EBX
-#define SHADOW_BYTES 24 /* 6 shadow parameters */
-#endif
-        cmpb  $0,tb_init_done
-UNLIKELY_START(ne, trace)
-        call  trace_hypercall
-        /* Now restore all the registers that trace_hypercall clobbered */
-        movl  UREGS_eax+SHADOW_BYTES(%esp),%eax /* Hypercall # */
-UNLIKELY_END(trace)
-        call *hypercall_table(,%eax,4)
-        movl  %eax,UREGS_eax+SHADOW_BYTES(%esp) # save the return value
-#undef SHADOW_BYTES
-        addl  $24,%esp     # Discard the shadow parameters
-#ifndef NDEBUG
-        /* Deliberately corrupt real parameter regs used by this hypercall. */
-        popl  %ecx         # Shadow EIP
-        cmpl  %ecx,UREGS_eip+4(%esp)
-        popl  %ecx         # Shadow hypercall index
-        jne   skip_clobber # If EIP has changed then don't clobber
-        movzb hypercall_args_table(,%ecx,1),%ecx
-        movl  %esp,%edi
-        movl  $0xDEADBEEF,%eax
-        rep   stosl
-skip_clobber:
-#endif
-
-test_all_events:
-        xorl %ecx,%ecx
-        notl %ecx
-        cli                             # tests must not race interrupts
-/*test_softirqs:*/  
-        movl VCPU_processor(%ebx),%eax
-        shl  $IRQSTAT_shift,%eax
-        test %ecx,irq_stat(%eax,1)
-        jnz  process_softirqs
-        testb $1,VCPU_mce_pending(%ebx)
-        jnz  process_mce
-.Ltest_guest_nmi:
-        testb $1,VCPU_nmi_pending(%ebx)
-        jnz  process_nmi
-test_guest_events:
-        movl VCPU_vcpu_info(%ebx),%eax
-        movzwl VCPUINFO_upcall_pending(%eax),%eax
-        decl %eax
-        cmpl $0xfe,%eax
-        ja   restore_all_guest
-/*process_guest_events:*/
-        sti
-        leal VCPU_trap_bounce(%ebx),%edx
-        movl VCPU_event_addr(%ebx),%eax
-        movl %eax,TRAPBOUNCE_eip(%edx)
-        movl VCPU_event_sel(%ebx),%eax
-        movw %ax,TRAPBOUNCE_cs(%edx)
-        movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
-        call create_bounce_frame
-        jmp  test_all_events
-
-        ALIGN
-process_softirqs:
-        sti       
-        call do_softirq
-        jmp  test_all_events
-
-        ALIGN
-/* %ebx: struct vcpu */
-process_mce:
-        testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%ebx)
-        jnz  .Ltest_guest_nmi
-        sti
-        movb $0,VCPU_mce_pending(%ebx)
-        call set_guest_machinecheck_trapbounce
-        test %eax,%eax
-        jz   test_all_events
-        movzbl VCPU_async_exception_mask(%ebx),%edx # save mask for the
-        movb %dl,VCPU_mce_old_mask(%ebx)            # iret hypercall
-        orl  $1 << VCPU_TRAP_MCE,%edx
-        movb %dl,VCPU_async_exception_mask(%ebx)
-        jmp process_trap
-
-        ALIGN
-/* %ebx: struct vcpu */
-process_nmi:
-        testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%ebx)
-        jnz  test_guest_events
-        sti
-        movb $0,VCPU_nmi_pending(%ebx)
-        call set_guest_nmi_trapbounce
-        test %eax,%eax
-        jz   test_all_events
-        movzbl VCPU_async_exception_mask(%ebx),%edx # save mask for the
-        movb %dl,VCPU_nmi_old_mask(%ebx)            # iret hypercall
-        orl  $1 << VCPU_TRAP_NMI,%edx
-        movb %dl,VCPU_async_exception_mask(%ebx)
-        /* FALLTHROUGH */
-process_trap:
-        leal VCPU_trap_bounce(%ebx),%edx
-        call create_bounce_frame
-        jmp  test_all_events
-
-bad_hypercall:
-        movl $-ENOSYS,UREGS_eax(%esp)
-        jmp  test_all_events
-
-/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
-/*   {EIP, CS, EFLAGS, [ESP, SS]}                                        */
-/* %edx == trap_bounce, %ebx == struct vcpu                       */
-/* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
-create_bounce_frame:
-        ASSERT_INTERRUPTS_ENABLED
-        movl UREGS_eflags+4(%esp),%ecx
-        movb UREGS_cs+4(%esp),%cl
-        testl $(2|X86_EFLAGS_VM),%ecx
-        jz   ring1 /* jump if returning to an existing ring-1 activation */
-        movl VCPU_kernel_sp(%ebx),%esi
-.Lft6:  mov  VCPU_kernel_ss(%ebx),%gs
-        testl $X86_EFLAGS_VM,%ecx
-UNLIKELY_START(nz, bounce_vm86_1)
-        subl $16,%esi       /* push ES/DS/FS/GS (VM86 stack frame) */
-        movl UREGS_es+4(%esp),%eax
-.Lft7:  movl %eax,%gs:(%esi)
-        movl UREGS_ds+4(%esp),%eax
-.Lft8:  movl %eax,%gs:4(%esi)
-        movl UREGS_fs+4(%esp),%eax
-.Lft9:  movl %eax,%gs:8(%esi)
-        movl UREGS_gs+4(%esp),%eax
-.Lft10: movl %eax,%gs:12(%esi)
-UNLIKELY_END(bounce_vm86_1)
-        subl $8,%esi        /* push SS/ESP (inter-priv iret) */
-        movl UREGS_esp+4(%esp),%eax
-.Lft11: movl %eax,%gs:(%esi)
-        movl UREGS_ss+4(%esp),%eax
-.Lft12: movl %eax,%gs:4(%esi)
-        jmp 1f
-ring1:  /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
-        movl UREGS_esp+4(%esp),%esi
-.Lft13: mov  UREGS_ss+4(%esp),%gs
-1:      /* Construct a stack frame: EFLAGS, CS/EIP */
-        movb TRAPBOUNCE_flags(%edx),%cl
-        subl $12,%esi
-        movl UREGS_eip+4(%esp),%eax
-.Lft14: movl %eax,%gs:(%esi)
-        movl VCPU_vcpu_info(%ebx),%eax
-        pushl VCPUINFO_upcall_mask(%eax)
-        testb $TBF_INTERRUPT,%cl
-        setnz %ch                        # TBF_INTERRUPT -> set upcall mask
-        orb  %ch,VCPUINFO_upcall_mask(%eax)
-        popl %eax
-        shll $16,%eax                    # Bits 16-23: saved_upcall_mask
-        movw UREGS_cs+4(%esp),%ax        # Bits  0-15: CS
-#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
-        testw $2,%ax
-        jnz  .Lft15
-        and  $~3,%ax                     # RPL 1 -> RPL 0
-#endif
-.Lft15: movl %eax,%gs:4(%esi)
-        test $0x00FF0000,%eax            # Bits 16-23: saved_upcall_mask
-        setz %ch                         # %ch == !saved_upcall_mask
-        movl UREGS_eflags+4(%esp),%eax
-        andl $~X86_EFLAGS_IF,%eax
-        shlb $1,%ch                      # Bit 9 (EFLAGS.IF)
-        orb  %ch,%ah                     # Fold EFLAGS.IF into %eax
-.Lft16: movl %eax,%gs:8(%esi)
-        test $TBF_EXCEPTION_ERRCODE,%cl
-        jz   1f
-        subl $4,%esi                    # push error_code onto guest frame
-        movl TRAPBOUNCE_error_code(%edx),%eax
-.Lft17: movl %eax,%gs:(%esi)
-1:      testb $TBF_FAILSAFE,%cl
-UNLIKELY_START(nz, bounce_failsafe)
-        subl $16,%esi                # add DS/ES/FS/GS to failsafe stack frame
-        testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
-        jnz  .Lvm86_2
-        movl UREGS_ds+4(%esp),%eax   # non-VM86: write real selector values
-.Lft22: movl %eax,%gs:(%esi)
-        movl UREGS_es+4(%esp),%eax
-.Lft23: movl %eax,%gs:4(%esi)
-        movl UREGS_fs+4(%esp),%eax
-.Lft24: movl %eax,%gs:8(%esi)
-        movl UREGS_gs+4(%esp),%eax
-.Lft25: movl %eax,%gs:12(%esi)
-        jmp  .Lnvm86_3
-.Lvm86_2:
-        xorl %eax,%eax               # VM86: we write zero selector values
-.Lft18: movl %eax,%gs:(%esi)
-.Lft19: movl %eax,%gs:4(%esi)
-.Lft20: movl %eax,%gs:8(%esi)
-.Lft21: movl %eax,%gs:12(%esi)
-UNLIKELY_END(bounce_failsafe)
-        testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
-UNLIKELY_START(nz, bounce_vm86_3)
-        xorl %eax,%eax      /* zero DS-GS, just as a real CPU would */
-        movl %eax,UREGS_ds+4(%esp)
-        movl %eax,UREGS_es+4(%esp)
-        movl %eax,UREGS_fs+4(%esp)
-        movl %eax,UREGS_gs+4(%esp)
-UNLIKELY_END(bounce_vm86_3)
-.Lnvm86_3:
-        /* Rewrite our stack frame and return to ring 1. */
-        /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
-        andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
-                X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+4(%esp)
-        mov  %gs,UREGS_ss+4(%esp)
-        movl %esi,UREGS_esp+4(%esp)
-        movzwl TRAPBOUNCE_cs(%edx),%eax
-        /* Null selectors (0-3) are not allowed. */
-        testl $~3,%eax
-        jz   domain_crash_synchronous
-        movl %eax,UREGS_cs+4(%esp)
-        movl TRAPBOUNCE_eip(%edx),%eax
-        movl %eax,UREGS_eip+4(%esp)
-        ret
-        _ASM_EXTABLE(.Lft6,  domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft7,  domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft8,  domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft9,  domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft10, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft11, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft12, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft13, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft14, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft15, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft16, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft17, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft18, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft19, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft20, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft21, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft22, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft23, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft24, domain_crash_synchronous)
-        _ASM_EXTABLE(.Lft25, domain_crash_synchronous)
-
-domain_crash_synchronous_string:
-        .asciz "domain_crash_sync called from entry.S (%lx)\n"
-
-domain_crash_synchronous:
-        pushl $domain_crash_synchronous_string
-        call  printk
-        jmp   __domain_crash_synchronous
-
-ENTRY(ret_from_intr)
-        GET_CURRENT(%ebx)
-        movl  UREGS_eflags(%esp),%eax
-        movb  UREGS_cs(%esp),%al
-        testl $(3|X86_EFLAGS_VM),%eax
-        jnz   test_all_events
-        jmp   restore_all_xen
-
-ENTRY(page_fault)
-        movw  $TRAP_page_fault,2(%esp)
-handle_exception:
-        FIXUP_RING0_GUEST_STACK
-        SAVE_ALL(1f,2f)
-        .text 1
-        /* Exception within Xen: make sure we have valid %ds,%es. */
-1:      mov   %ecx,%ds
-        mov   %ecx,%es
-        jmp   2f
-        .previous
-2:      testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
-        jz    exception_with_ints_disabled
-        sti                             # re-enable interrupts
-1:      xorl  %eax,%eax
-        movw  UREGS_entry_vector(%esp),%ax
-        movl  %esp,%edx
-        pushl %edx                      # push the cpu_user_regs pointer
-        GET_CURRENT(%ebx)
-        PERFC_INCR(exceptions, %eax, %ebx)
-        call  *exception_table(,%eax,4)
-        addl  $4,%esp
-        movl  UREGS_eflags(%esp),%eax
-        movb  UREGS_cs(%esp),%al
-        testl $(3|X86_EFLAGS_VM),%eax
-        jz    restore_all_xen
-        leal  VCPU_trap_bounce(%ebx),%edx
-        testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
-        jz    test_all_events
-        call  create_bounce_frame
-        movb  $0,TRAPBOUNCE_flags(%edx)
-        jmp   test_all_events
-
-exception_with_ints_disabled:
-        movl  UREGS_eflags(%esp),%eax
-        movb  UREGS_cs(%esp),%al
-        testl $(3|X86_EFLAGS_VM),%eax   # interrupts disabled outside Xen?
-        jnz   FATAL_exception_with_ints_disabled
-        pushl %esp
-        call  search_pre_exception_table
-        addl  $4,%esp
-        testl %eax,%eax                 # no fixup code for faulting EIP?
-        jz    1b
-        movl  %eax,UREGS_eip(%esp)
-        movl  %esp,%esi
-        subl  $4,%esp
-        movl  %esp,%edi
-        movl  $UREGS_kernel_sizeof/4,%ecx
-        rep;  movsl                     # make room for error_code/entry_vector
-        movl  UREGS_error_code(%esp),%eax # error_code/entry_vector
-        movl  %eax,UREGS_kernel_sizeof(%esp)
-        jmp   restore_all_xen           # return to fixup code
-
-FATAL_exception_with_ints_disabled:
-        xorl  %esi,%esi
-        movw  UREGS_entry_vector(%esp),%si
-        movl  %esp,%edx
-        pushl %edx                      # push the cpu_user_regs pointer
-        pushl %esi                      # push the trapnr (entry vector)
-        call  fatal_trap
-        ud2
-                                        
-ENTRY(coprocessor_error)
-        pushl $TRAP_copro_error<<16
-        jmp   handle_exception
-
-ENTRY(simd_coprocessor_error)
-        pushl $TRAP_simd_error<<16
-        jmp   handle_exception
-
-ENTRY(device_not_available)
-        pushl $TRAP_no_device<<16
-        jmp   handle_exception
-
-ENTRY(divide_error)
-        pushl $TRAP_divide_error<<16
-        jmp   handle_exception
-
-ENTRY(debug)
-        pushl $TRAP_debug<<16
-        jmp   handle_exception
-
-ENTRY(int3)
-        pushl $TRAP_int3<<16
-        jmp   handle_exception
-
-ENTRY(overflow)
-        pushl $TRAP_overflow<<16
-        jmp   handle_exception
-
-ENTRY(bounds)
-        pushl $TRAP_bounds<<16
-        jmp   handle_exception
-
-ENTRY(invalid_op)
-        pushl $TRAP_invalid_op<<16
-        jmp   handle_exception
-
-ENTRY(coprocessor_segment_overrun)
-        pushl $TRAP_copro_seg<<16
-        jmp   handle_exception
-
-ENTRY(invalid_TSS)
-        movw  $TRAP_invalid_tss,2(%esp)
-        jmp   handle_exception
-
-ENTRY(segment_not_present)
-        movw  $TRAP_no_segment,2(%esp)
-        jmp   handle_exception
-
-ENTRY(stack_segment)
-        movw  $TRAP_stack_error,2(%esp)
-        jmp   handle_exception
-
-ENTRY(general_protection)
-        movw  $TRAP_gp_fault,2(%esp)
-        jmp   handle_exception
-
-ENTRY(alignment_check)
-        movw  $TRAP_alignment_check,2(%esp)
-        jmp   handle_exception
-
-ENTRY(spurious_interrupt_bug)
-        pushl $TRAP_spurious_int<<16
-        jmp   handle_exception
-
-        .pushsection .init.text, "ax", @progbits
-ENTRY(early_page_fault)
-        SAVE_ALL(1f,1f)
-1:      movl  %esp,%eax
-        pushl %eax
-        call  do_early_page_fault
-        addl  $4,%esp
-        jmp   restore_all_xen
-        .popsection
-
-handle_nmi_mce:
-#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
-        # NMI/MCE entry protocol is incompatible with guest kernel in ring 0.
-        addl  $4,%esp
-        iret
-#else
-        # Save state but do not trash the segment registers!
-        SAVE_ALL(.Lnmi_mce_xen,.Lnmi_mce_common)
-.Lnmi_mce_common:
-        xorl  %eax,%eax
-        movw  UREGS_entry_vector(%esp),%ax
-        movl  %esp,%edx
-        pushl %edx
-        call  *exception_table(,%eax,4)
-        addl  $4,%esp
-        /* 
-         * NB. We may return to Xen context with polluted %ds/%es. But in such
-         * cases we have put guest DS/ES on the guest stack frame, which will
-         * be detected by SAVE_ALL(), or we have rolled back restore_guest.
-         */
-        jmp   ret_from_intr
-.Lnmi_mce_xen:
-        /* Check the outer (guest) context for %ds/%es state validity. */
-        GET_CPUINFO_FIELD(CPUINFO_guest_cpu_user_regs,%ebx)
-        testl $X86_EFLAGS_VM,%ss:UREGS_eflags(%ebx)
-        mov   %ds,%eax
-        mov   %es,%edx
-        jnz   .Lnmi_mce_vm86
-        /* We may have interrupted Xen while messing with %ds/%es... */
-        cmpw  %ax,%cx
-        mov   %ecx,%ds             /* Ensure %ds is valid */
-        cmove UREGS_ds(%ebx),%eax  /* Grab guest DS if it wasn't in %ds */
-        cmpw  %dx,%cx
-        movl  %eax,UREGS_ds(%ebx)  /* Ensure guest frame contains guest DS */
-        cmove UREGS_es(%ebx),%edx  /* Grab guest ES if it wasn't in %es */
-        mov   %ecx,%es             /* Ensure %es is valid */
-        movl  $.Lrestore_sregs_guest,%ecx
-        movl  %edx,UREGS_es(%ebx)  /* Ensure guest frame contains guest ES */
-        cmpl  %ecx,UREGS_eip(%esp)
-        jbe   .Lnmi_mce_common
-        cmpl  $.Lrestore_iret_guest,UREGS_eip(%esp)
-        ja    .Lnmi_mce_common
-        /* Roll outer context restore_guest back to restoring %ds/%es. */
-        movl  %ecx,UREGS_eip(%esp)
-        jmp   .Lnmi_mce_common
-.Lnmi_mce_vm86:
-        /* vm86 is easy: the CPU saved %ds/%es so we can safely stomp them. */
-        mov   %ecx,%ds
-        mov   %ecx,%es
-        jmp   .Lnmi_mce_common
-#endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
-
-ENTRY(nmi)
-        pushl $TRAP_nmi<<16
-        jmp   handle_nmi_mce
-
-ENTRY(machine_check)
-        pushl $TRAP_machine_check<<16
-        jmp   handle_nmi_mce
-
-ENTRY(setup_vm86_frame)
-        mov %ecx,%ds
-        mov %ecx,%es
-        # Copies the entire stack frame forwards by 16 bytes.
-        .macro copy_vm86_words count=18
-        .if \count
-        pushl ((\count-1)*4)(%esp)
-        popl  ((\count-1)*4)+16(%esp)
-        copy_vm86_words "(\count-1)"
-        .endif
-        .endm
-        copy_vm86_words
-        addl $16,%esp
-        ret
-
-.section .rodata, "a", @progbits
-
-ENTRY(exception_table)
-        .long do_divide_error
-        .long do_debug
-        .long do_nmi
-        .long do_int3
-        .long do_overflow
-        .long do_bounds
-        .long do_invalid_op
-        .long do_device_not_available
-        .long 0 # double fault
-        .long do_coprocessor_segment_overrun
-        .long do_invalid_TSS
-        .long do_segment_not_present
-        .long do_stack_segment
-        .long do_general_protection
-        .long do_page_fault
-        .long do_spurious_interrupt_bug
-        .long do_coprocessor_error
-        .long do_alignment_check
-        .long do_machine_check
-        .long do_simd_coprocessor_error
-
-ENTRY(hypercall_table)
-        .long do_set_trap_table     /*  0 */
-        .long do_mmu_update
-        .long do_set_gdt
-        .long do_stack_switch
-        .long do_set_callbacks
-        .long do_fpu_taskswitch     /*  5 */
-        .long do_sched_op_compat
-        .long do_platform_op
-        .long do_set_debugreg
-        .long do_get_debugreg
-        .long do_update_descriptor  /* 10 */
-        .long do_ni_hypercall
-        .long do_memory_op
-        .long do_multicall
-        .long do_update_va_mapping
-        .long do_set_timer_op       /* 15 */
-        .long do_event_channel_op_compat
-        .long do_xen_version
-        .long do_console_io
-        .long do_physdev_op_compat
-        .long do_grant_table_op     /* 20 */
-        .long do_vm_assist
-        .long do_update_va_mapping_otherdomain
-        .long do_iret
-        .long do_vcpu_op
-        .long do_ni_hypercall       /* 25 */
-        .long do_mmuext_op
-        .long do_xsm_op
-        .long do_nmi_op
-        .long do_sched_op
-        .long do_callback_op        /* 30 */
-        .long do_xenoprof_op
-        .long do_event_channel_op
-        .long do_physdev_op
-        .long do_hvm_op
-        .long do_sysctl             /* 35 */
-        .long do_domctl
-        .long do_kexec_op
-        .long do_tmem_op
-        .rept __HYPERVISOR_arch_0-((.-hypercall_table)/4)
-        .long do_ni_hypercall
-        .endr
-        .long do_mca                /* 48 */
-        .rept NR_hypercalls-((.-hypercall_table)/4)
-        .long do_ni_hypercall
-        .endr
-
-ENTRY(hypercall_args_table)
-        .byte 1 /* do_set_trap_table    */  /*  0 */
-        .byte 4 /* do_mmu_update        */
-        .byte 2 /* do_set_gdt           */
-        .byte 2 /* do_stack_switch      */
-        .byte 4 /* do_set_callbacks     */
-        .byte 1 /* do_fpu_taskswitch    */  /*  5 */
-        .byte 2 /* do_sched_op_compat   */
-        .byte 1 /* do_platform_op       */
-        .byte 2 /* do_set_debugreg      */
-        .byte 1 /* do_get_debugreg      */
-        .byte 4 /* do_update_descriptor */  /* 10 */
-        .byte 0 /* do_ni_hypercall      */
-        .byte 2 /* do_memory_op         */
-        .byte 2 /* do_multicall         */
-        .byte 4 /* do_update_va_mapping */
-        .byte 2 /* do_set_timer_op      */  /* 15 */
-        .byte 1 /* do_event_channel_op_compat */
-        .byte 2 /* do_xen_version       */
-        .byte 3 /* do_console_io        */
-        .byte 1 /* do_physdev_op_compat */
-        .byte 3 /* do_grant_table_op    */  /* 20 */
-        .byte 2 /* do_vm_assist         */
-        .byte 5 /* do_update_va_mapping_otherdomain */
-        .byte 0 /* do_iret              */
-        .byte 3 /* do_vcpu_op           */
-        .byte 0 /* do_ni_hypercall      */  /* 25 */
-        .byte 4 /* do_mmuext_op         */
-        .byte 1 /* do_xsm_op            */
-        .byte 2 /* do_nmi_op            */
-        .byte 2 /* do_sched_op          */
-        .byte 2 /* do_callback_op       */  /* 30 */
-        .byte 2 /* do_xenoprof_op       */
-        .byte 2 /* do_event_channel_op  */
-        .byte 2 /* do_physdev_op        */
-        .byte 2 /* do_hvm_op            */
-        .byte 1 /* do_sysctl            */  /* 35 */
-        .byte 1 /* do_domctl            */
-        .byte 2 /* do_kexec_op          */
-        .byte 1 /* do_tmem_op           */
-        .rept __HYPERVISOR_arch_0-(.-hypercall_args_table)
-        .byte 0 /* do_ni_hypercall      */
-        .endr
-        .byte 1 /* do_mca               */  /* 48 */
-        .rept NR_hypercalls-(.-hypercall_args_table)
-        .byte 0 /* do_ni_hypercall      */
-        .endr
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/x86_32/gdbstub.c
--- a/xen/arch/x86/x86_32/gdbstub.c     Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,82 +0,0 @@
-/*
- * x86-specific gdb stub routines
- * based on x86 cdb(xen/arch/x86/cdb.c), but Extensively modified.
- * 
- * Copyright (C) 2006 Isaku Yamahata <yamahata at valinux co jp>
- *                    VA Linux Systems Japan. K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-#include <asm/debugger.h>
-
-void 
-gdb_arch_read_reg_array(struct cpu_user_regs *regs, struct gdb_context *ctx)
-{
-#define GDB_REG(r) gdb_write_to_packet_hex(r, sizeof(r), ctx);
-    GDB_REG(regs->eax);
-    GDB_REG(regs->ecx);
-    GDB_REG(regs->edx);
-    GDB_REG(regs->ebx);
-    GDB_REG(regs->esp);
-    GDB_REG(regs->ebp);
-    GDB_REG(regs->esi);
-    GDB_REG(regs->edi);
-    GDB_REG(regs->eip);
-    GDB_REG(regs->eflags);
-#undef GDB_REG
-#define GDB_SEG_REG(s)  gdb_write_to_packet_hex(s, sizeof(u32), ctx);
-    /* sizeof(segment) = 16bit */
-    /* but gdb requires its return value as 32bit value */
-    GDB_SEG_REG(regs->cs);
-    GDB_SEG_REG(regs->ss);
-    GDB_SEG_REG(regs->ds);
-    GDB_SEG_REG(regs->es);
-    GDB_SEG_REG(regs->fs);
-    GDB_SEG_REG(regs->gs);
-#undef GDB_SEG_REG
-    gdb_send_packet(ctx);
-}
-
-void
-gdb_arch_write_reg_array(struct cpu_user_regs *regs, const char* buf,
-                         struct gdb_context *ctx)
-{
-    /* XXX TODO */
-    gdb_send_reply("E02", ctx);
-}
-
-void
-gdb_arch_read_reg(unsigned long regnum, struct cpu_user_regs *regs,
-                  struct gdb_context *ctx)
-{
-    gdb_send_reply("", ctx);
-}
-
-void
-gdb_arch_write_reg(unsigned long regnum, unsigned long val, 
-                    struct cpu_user_regs *regs, struct gdb_context *ctx)
-{
-    gdb_send_reply("", ctx);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * End:
- */
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/x86_32/gpr_switch.S
--- a/xen/arch/x86/x86_32/gpr_switch.S  Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * GPR context switch between host and guest.
- * Used by IO-port-access emulation stub.
- *
- * Copyright (c) 2006, Novell, Inc.
- */
-
-#include <xen/config.h>
-#include <asm/asm_defns.h>
-
-ENTRY(host_to_guest_gpr_switch)
-        movl  (%esp), %ecx
-        movl  %eax, (%esp)
-        movl  UREGS_edx(%eax), %edx
-        pushl %ebx
-        movl  UREGS_ebx(%eax), %ebx
-        pushl %ebp
-        movl  UREGS_ebp(%eax), %ebp
-        pushl %esi
-        movl  UREGS_esi(%eax), %esi
-        pushl %edi
-        movl  UREGS_edi(%eax), %edi
-        pushl $guest_to_host_gpr_switch
-        pushl %ecx
-        movl  UREGS_ecx(%eax), %ecx
-        movl  UREGS_eax(%eax), %eax
-        ret
-
-ENTRY(guest_to_host_gpr_switch)
-        pushl %edx
-        movl  5*4(%esp), %edx
-        movl  %eax, UREGS_eax(%edx)
-        popl  UREGS_edx(%edx)
-        movl  %edi, UREGS_edi(%edx)
-        popl  %edi
-        movl  %esi, UREGS_esi(%edx)
-        popl  %esi
-        movl  %ebp, UREGS_ebp(%edx)
-        popl  %ebp
-        movl  %ebx, UREGS_ebx(%edx)
-        popl  %ebx
-        movl  %ecx, UREGS_ecx(%edx)
-        popl  %ecx
-        ret
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/x86_32/machine_kexec.c
--- a/xen/arch/x86/x86_32/machine_kexec.c       Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-/******************************************************************************
- * machine_kexec.c
- *
- * Xen port written by:
- * - Simon 'Horms' Horman <horms@xxxxxxxxxxxx>
- * - Magnus Damm <magnus@xxxxxxxxxxxxx>
- */
-
-#include <xen/types.h>
-#include <xen/kernel.h>
-#include <asm/page.h>
-#include <public/kexec.h>
-
-int machine_kexec_get_xen(xen_kexec_range_t *range)
-{
-        range->start = virt_to_maddr(_start);
-        range->size = (unsigned long)xenheap_phys_end -
-                      (unsigned long)range->start;
-        return 0;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c  Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,379 +0,0 @@
-/******************************************************************************
- * arch/x86/x86_32/mm.c
- * 
- * Modifications to Linux original are copyright (c) 2004, K A Fraser
- * 
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#include <xen/config.h>
-#include <xen/lib.h>
-#include <xen/init.h>
-#include <xen/mm.h>
-#include <xen/sched.h>
-#include <xen/guest_access.h>
-#include <asm/current.h>
-#include <asm/page.h>
-#include <asm/flushtlb.h>
-#include <asm/fixmap.h>
-#include <asm/setup.h>
-#include <public/memory.h>
-
-unsigned int __read_mostly PAGE_HYPERVISOR         = __PAGE_HYPERVISOR;
-unsigned int __read_mostly PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE;
-
-static unsigned long __read_mostly mpt_size;
-
-void *alloc_xen_pagetable(void)
-{
-    unsigned long mfn;
-
-    if ( system_state != SYS_STATE_early_boot )
-    {
-        void *v = alloc_xenheap_page();
-
-        BUG_ON(!dom0 && !v);
-        return v;
-    }
-
-    mfn = xenheap_initial_phys_start >> PAGE_SHIFT;
-    xenheap_initial_phys_start += PAGE_SIZE;
-    return mfn_to_virt(mfn);
-}
-
-l2_pgentry_t *virt_to_xen_l2e(unsigned long v)
-{
-    return &idle_pg_table_l2[l2_linear_offset(v)];
-}
-
-void *do_page_walk(struct vcpu *v, unsigned long addr)
-{
-    return NULL;
-}
-
-void __init paging_init(void)
-{
-    unsigned long v;
-    struct page_info *pg;
-    unsigned int i, n;
-
-    if ( cpu_has_pge )
-    {
-        /* Suitable Xen mapping can be GLOBAL. */
-        set_in_cr4(X86_CR4_PGE);
-        PAGE_HYPERVISOR         |= _PAGE_GLOBAL;
-        PAGE_HYPERVISOR_NOCACHE |= _PAGE_GLOBAL;
-        /* Transform early mappings (e.g., the frametable). */
-        for ( v = HYPERVISOR_VIRT_START; v; v += (1 << L2_PAGETABLE_SHIFT) )
-            if ( (l2e_get_flags(idle_pg_table_l2[l2_linear_offset(v)]) &
-                  (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT) )
-                l2e_add_flags(idle_pg_table_l2[l2_linear_offset(v)],
-                              _PAGE_GLOBAL);
-        for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
-            l1e_add_flags(l1_identmap[i], _PAGE_GLOBAL);
-    }
-
-    /*
-     * Allocate and map the machine-to-phys table and create read-only mapping 
-     * of MPT for guest-OS use.
-     */
-    mpt_size  = (max_page * BYTES_PER_LONG) + (1UL << L2_PAGETABLE_SHIFT) - 1;
-    mpt_size &= ~((1UL << L2_PAGETABLE_SHIFT) - 1UL);
-#define MFN(x) (((x) << L2_PAGETABLE_SHIFT) / sizeof(unsigned long))
-#define CNT ((sizeof(*frame_table) & -sizeof(*frame_table)) / \
-             sizeof(*machine_to_phys_mapping))
-    BUILD_BUG_ON((sizeof(*frame_table) & ~sizeof(*frame_table)) % \
-                 sizeof(*machine_to_phys_mapping));
-    for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ )
-    {
-        for ( n = 0; n < CNT; ++n)
-            if ( mfn_valid(MFN(i) + n * PDX_GROUP_COUNT) )
-                break;
-        if ( n == CNT )
-            continue;
-        if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0)) == NULL )
-            panic("Not enough memory to bootstrap Xen.\n");
-        l2e_write(&idle_pg_table_l2[l2_linear_offset(RDWR_MPT_VIRT_START) + i],
-                  l2e_from_page(pg, PAGE_HYPERVISOR | _PAGE_PSE));
-        /* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */
-        l2e_write(&idle_pg_table_l2[l2_linear_offset(RO_MPT_VIRT_START) + i],
-                  l2e_from_page(
-                      pg, (__PAGE_HYPERVISOR | _PAGE_PSE) & ~_PAGE_RW));
-        /* Fill with INVALID_M2P_ENTRY. */
-        memset((void *)(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)), 0xFF,
-               1UL << L2_PAGETABLE_SHIFT);
-    }
-#undef CNT
-#undef MFN
-
-    machine_to_phys_mapping_valid = 1;
-
-    /* Create page tables for ioremap()/map_domain_page_global(). */
-    for ( i = 0; i < (IOREMAP_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
-    {
-        void *p;
-        l2_pgentry_t *pl2e;
-        pl2e = &idle_pg_table_l2[l2_linear_offset(IOREMAP_VIRT_START) + i];
-        if ( l2e_get_flags(*pl2e) & _PAGE_PRESENT )
-            continue;
-        p = alloc_xenheap_page();
-        clear_page(p);
-        l2e_write(pl2e, l2e_from_page(virt_to_page(p), __PAGE_HYPERVISOR));
-    }
-}
-
-void __init setup_idle_pagetable(void)
-{
-    int i;
-
-    for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
-        l2e_write(&idle_pg_table_l2[l2_linear_offset(PERDOMAIN_VIRT_START)+i],
-                  l2e_from_page(virt_to_page(idle_vcpu[0]->domain->
-                                             arch.mm_perdomain_pt) + i,
-                                __PAGE_HYPERVISOR));
-}
-
-void __init zap_low_mappings(l2_pgentry_t *dom0_l2)
-{
-    int i;
-
-    /* Clear temporary idle mappings from the dom0 initial l2. */
-    for ( i = 0; i < (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT); i++ )
-        if ( l2e_get_intpte(dom0_l2[i]) ==
-             l2e_get_intpte(idle_pg_table_l2[i]) )
-            l2e_write(&dom0_l2[i], l2e_empty());
-
-    /* Now zap mappings in the idle pagetables. */
-    BUG_ON(l2e_get_pfn(idle_pg_table_l2[0]) != virt_to_mfn(l1_identmap));
-    l2e_write_atomic(&idle_pg_table_l2[0], l2e_empty());
-    destroy_xen_mappings(0, HYPERVISOR_VIRT_START);
-
-    flush_all(FLUSH_TLB_GLOBAL);
-
-    /* Replace with mapping of the boot trampoline only. */
-    map_pages_to_xen(trampoline_phys, trampoline_phys >> PAGE_SHIFT,
-                     PFN_UP(trampoline_end - trampoline_start),
-                     __PAGE_HYPERVISOR);
-}
-
-void __init subarch_init_memory(void)
-{
-    unsigned long m2p_start_mfn;
-    unsigned int i, j;
-    l2_pgentry_t l2e;
-
-    BUILD_BUG_ON(sizeof(struct page_info) != 24);
-
-    /* M2P table is mappable read-only by privileged domains. */
-    for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ )
-    {
-        l2e = idle_pg_table_l2[l2_linear_offset(RDWR_MPT_VIRT_START) + i];
-        if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
-            continue;
-        m2p_start_mfn = l2e_get_pfn(l2e);
-        for ( j = 0; j < L2_PAGETABLE_ENTRIES; j++ )
-        {
-            struct page_info *page = mfn_to_page(m2p_start_mfn + j);
-            share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
-        }
-    }
-
-    if ( supervisor_mode_kernel )
-    {
-        /* Guest kernel runs in ring 0, not ring 1. */
-        struct desc_struct *d;
-        d = &boot_cpu_gdt_table[(FLAT_RING1_CS >> 3) - 
FIRST_RESERVED_GDT_ENTRY];
-        d[0].b &= ~_SEGMENT_DPL;
-        d[1].b &= ~_SEGMENT_DPL;
-    }
-}
-
-long subarch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
-{
-    struct xen_machphys_mfn_list xmml;
-    unsigned long mfn, last_mfn;
-    unsigned int i, max;
-    l2_pgentry_t l2e;
-    long rc = 0;
-
-    switch ( op )
-    {
-    case XENMEM_machphys_mfn_list:
-        if ( copy_from_guest(&xmml, arg, 1) )
-            return -EFAULT;
-
-        max = min_t(unsigned int, xmml.max_extents, mpt_size >> 21);
-
-        for ( i = 0, last_mfn = 0; i < max; i++ )
-        {
-            l2e = idle_pg_table_l2[l2_linear_offset(
-                RDWR_MPT_VIRT_START + (i << 21))];
-            if ( l2e_get_flags(l2e) & _PAGE_PRESENT )
-                mfn = l2e_get_pfn(l2e);
-            else
-                mfn = last_mfn;
-            ASSERT(mfn);
-            if ( copy_to_guest_offset(xmml.extent_start, i, &mfn, 1) )
-                return -EFAULT;
-            last_mfn = mfn;
-        }
-
-        xmml.nr_extents = i;
-        if ( copy_to_guest(arg, &xmml, 1) )
-            return -EFAULT;
-
-        break;
-
-    default:
-        rc = -ENOSYS;
-        break;
-    }
-
-    return rc;
-}
-
-long do_stack_switch(unsigned long ss, unsigned long esp)
-{
-    struct tss_struct *t = &this_cpu(init_tss);
-
-    fixup_guest_stack_selector(current->domain, ss);
-
-    current->arch.pv_vcpu.kernel_ss = ss;
-    current->arch.pv_vcpu.kernel_sp = esp;
-    t->ss1  = ss;
-    t->esp1 = esp;
-
-    return 0;
-}
-
-/* Returns TRUE if given descriptor is valid for GDT or LDT. */
-int check_descriptor(const struct domain *dom, struct desc_struct *d)
-{
-    unsigned long base, limit;
-    u32 a = d->a, b = d->b;
-    u16 cs;
-
-    /* Let a ring0 guest kernel set any descriptor it wants to. */
-    if ( supervisor_mode_kernel )
-        return 1;
-
-    /* A not-present descriptor will always fault, so is safe. */
-    if ( !(b & _SEGMENT_P) ) 
-        goto good;
-
-    /*
-     * We don't allow a DPL of zero. There is no legitimate reason for 
-     * specifying DPL==0, and it gets rather dangerous if we also accept call 
-     * gates (consider a call gate pointing at another kernel descriptor with 
-     * DPL 0 -- this would get the OS ring-0 privileges).
-     */
-    if ( (b & _SEGMENT_DPL) < (GUEST_KERNEL_RPL(dom) << 13) )
-        d->b = b = (b & ~_SEGMENT_DPL) | (GUEST_KERNEL_RPL(dom) << 13);
-
-    if ( !(b & _SEGMENT_S) )
-    {
-        /*
-         * System segment:
-         *  1. Don't allow interrupt or trap gates as they belong in the IDT.
-         *  2. Don't allow TSS descriptors or task gates as we don't
-         *     virtualise x86 tasks.
-         *  3. Don't allow LDT descriptors because they're unnecessary and
-         *     I'm uneasy about allowing an LDT page to contain LDT
-         *     descriptors. In any case, Xen automatically creates the
-         *     required descriptor when reloading the LDT register.
-         *  4. We allow call gates but they must not jump to a private segment.
-         */
-
-        /* Disallow everything but call gates. */
-        if ( (b & _SEGMENT_TYPE) != 0xc00 )
-            goto bad;
-
-        /* Validate and fix up the target code selector. */
-        cs = a >> 16;
-        fixup_guest_code_selector(dom, cs);
-        if ( !guest_gate_selector_okay(dom, cs) )
-            goto bad;
-        a = d->a = (d->a & 0xffffU) | (cs << 16);
-
-        /* Reserved bits must be zero. */
-        if ( (b & 0xe0) != 0 )
-            goto bad;
-        
-        /* No base/limit check is needed for a call gate. */
-        goto good;
-    }
-    
-    /* Check that base is at least a page away from Xen-private area. */
-    base  = (b&(0xff<<24)) | ((b&0xff)<<16) | (a>>16);
-    if ( base >= (GUEST_SEGMENT_MAX_ADDR - PAGE_SIZE) )
-        goto bad;
-
-    /* Check and truncate the limit if necessary. */
-    limit = (b&0xf0000) | (a&0xffff);
-    limit++; /* We add one because limit is inclusive. */
-    if ( (b & _SEGMENT_G) )
-        limit <<= 12;
-
-    if ( (b & (_SEGMENT_CODE | _SEGMENT_EC)) == _SEGMENT_EC )
-    {
-        /*
-         * DATA, GROWS-DOWN.
-         * Grows-down limit check. 
-         * NB. limit == 0xFFFFF provides no access      (if G=1).
-         *     limit == 0x00000 provides 4GB-4kB access (if G=1).
-         */
-        if ( (base + limit) > base )
-        {
-            limit = -(base & PAGE_MASK);
-            goto truncate;
-        }
-    }
-    else
-    {
-        /*
-         * DATA, GROWS-UP. 
-         * CODE (CONFORMING AND NON-CONFORMING).
-         * Grows-up limit check.
-         * NB. limit == 0xFFFFF provides 4GB access (if G=1).
-         *     limit == 0x00000 provides 4kB access (if G=1).
-         */
-        if ( ((base + limit) <= base) || 
-             ((base + limit) > GUEST_SEGMENT_MAX_ADDR) )
-        {
-            limit = GUEST_SEGMENT_MAX_ADDR - base;
-        truncate:
-            if ( !(b & _SEGMENT_G) )
-                goto bad; /* too dangerous; too hard to work out... */
-            limit = (limit >> 12) - 1;
-            d->a &= ~0x0ffff; d->a |= limit & 0x0ffff;
-            d->b &= ~0xf0000; d->b |= limit & 0xf0000;
-        }
-    }
-
- good:
-    return 1;
- bad:
-    return 0;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/x86_32/pci.c
--- a/xen/arch/x86/x86_32/pci.c Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-/******************************************************************************
- * pci.c
- * 
- * Architecture-dependent PCI access functions.
- */
-
-#include <xen/spinlock.h>
-#include <xen/pci.h>
-#include <xen/init.h>
-#include <asm/io.h>
-
-#define PCI_CONF_ADDRESS(bus, dev, func, reg) \
-    (0x80000000 | (bus << 16) | (dev << 11) | (func << 8) | (reg & ~3))
-
-uint8_t pci_conf_read8(
-    unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
-    unsigned int reg)
-{
-    if ( seg || (reg > 255) )
-        return ~0;
-    BUG_ON((bus > 255) || (dev > 31) || (func > 7));
-    return pci_conf_read(PCI_CONF_ADDRESS(bus, dev, func, reg), reg & 3, 1);
-}
-
-uint16_t pci_conf_read16(
-    unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
-    unsigned int reg)
-{
-    if ( seg || (reg > 255) )
-        return ~0;
-    BUG_ON((bus > 255) || (dev > 31) || (func > 7));
-    return pci_conf_read(PCI_CONF_ADDRESS(bus, dev, func, reg), reg & 2, 2);
-}
-
-uint32_t pci_conf_read32(
-    unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
-    unsigned int reg)
-{
-    if ( seg || (reg > 255) )
-        return ~0;
-    BUG_ON((bus > 255) || (dev > 31) || (func > 7));
-    return pci_conf_read(PCI_CONF_ADDRESS(bus, dev, func, reg), 0, 4);
-}
-
-void pci_conf_write8(
-    unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
-    unsigned int reg, uint8_t data)
-{
-    if ( seg )
-        return;
-    BUG_ON((bus > 255) || (dev > 31) || (func > 7) || (reg > 255));
-    pci_conf_write(PCI_CONF_ADDRESS(bus, dev, func, reg), reg & 3, 1, data);
-}
-
-void pci_conf_write16(
-    unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
-    unsigned int reg, uint16_t data)
-{
-    if ( seg )
-        return;
-    BUG_ON((bus > 255) || (dev > 31) || (func > 7) || (reg > 255));
-    pci_conf_write(PCI_CONF_ADDRESS(bus, dev, func, reg), reg & 2, 2, data);
-}
-
-void pci_conf_write32(
-    unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
-    unsigned int reg, uint32_t data)
-{
-    if ( seg )
-        return;
-    BUG_ON((bus > 255) || (dev > 31) || (func > 7) || (reg > 255));
-    pci_conf_write(PCI_CONF_ADDRESS(bus, dev, func, reg), 0, 4, data);
-}
-
-void __init arch_pci_ro_device(int seg, int bdf)
-{
-}
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/x86_32/seg_fixup.c
--- a/xen/arch/x86/x86_32/seg_fixup.c   Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,577 +0,0 @@
-/******************************************************************************
- * arch/x86/x86_32/seg_fixup.c
- * 
- * Support for -ve accesses to pseudo-4GB segments.
- * 
- * Copyright (c) 2004, K A Fraser
- * 
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#include <xen/config.h>
-#include <xen/init.h>
-#include <xen/sched.h>
-#include <xen/lib.h>
-#include <xen/errno.h>
-#include <xen/mm.h>
-#include <xen/perfc.h>
-#include <asm/current.h>
-#include <asm/processor.h>
-#include <asm/regs.h>
-#include <asm/x86_emulate.h>
-
-/* General instruction properties. */
-#define INSN_SUFFIX_BYTES (7)
-#define OPCODE_BYTE       (1<<4)  
-#define HAS_MODRM         (1<<5)
-
-/* Short forms for the table. */
-#define X  0 /* invalid for some random reason */
-#define O  OPCODE_BYTE
-#define M  HAS_MODRM
-
-static const u8 insn_decode[256] = {
-    /* 0x00 - 0x0F */
-    O|M, O|M, O|M, O|M, X, X, X, X,
-    O|M, O|M, O|M, O|M, X, X, X, X,
-    /* 0x10 - 0x1F */
-    O|M, O|M, O|M, O|M, X, X, X, X,
-    O|M, O|M, O|M, O|M, X, X, X, X,
-    /* 0x20 - 0x2F */
-    O|M, O|M, O|M, O|M, X, X, X, X,
-    O|M, O|M, O|M, O|M, X, X, X, X,
-    /* 0x30 - 0x3F */
-    O|M, O|M, O|M, O|M, X, X, X, X,
-    O|M, O|M, O|M, O|M, X, X, X, X,
-    /* 0x40 - 0x4F */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0x50 - 0x5F */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0x60 - 0x6F */
-    X, X, X, X, X, X, X, X,
-    X, O|M|4, X, O|M|1, X, X, X, X,
-    /* 0x70 - 0x7F */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0x80 - 0x8F */
-    O|M|1, O|M|4, O|M|1, O|M|1, O|M, O|M, O|M, O|M,
-    O|M, O|M, O|M, O|M, O|M, X|M, O|M, O|M,
-    /* 0x90 - 0x9F */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0xA0 - 0xAF */
-    O|4, O|4, O|4, O|4, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0xB0 - 0xBF */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0xC0 - 0xCF */
-    O|M|1, O|M|1, X, X, X, X, O|M|1, O|M|4,
-    X, X, X, X, X, X, X, X,
-    /* 0xD0 - 0xDF */
-    O|M, O|M, O|M, O|M, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0xE0 - 0xEF */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0xF0 - 0xFF */
-    X, X, X, X, X, X, O|M, O|M,
-    X, X, X, X, X, X, O|M, O|M
-};
-
-static const u8 float_decode[64] = {
-    O|M, O|M, O|M, O|M, O|M, O|M, O|M, O|M, /* 0xD8 */
-    O|M, X, O|M, O|M, O|M, O|M, O|M, O|M, /* 0xD9 */
-    O|M, O|M, O|M, O|M, O|M, O|M, O|M, O|M, /* 0xDA */
-    O|M, X, O|M, O|M, X, O|M, X, O|M, /* 0xDB */
-    O|M, O|M, O|M, O|M, O|M, O|M, O|M, O|M, /* 0xDC */
-    O|M, O|M, O|M, O|M, O|M, X, O|M, O|M, /* 0xDD */
-    O|M, O|M, O|M, O|M, O|M, O|M, O|M, O|M, /* 0xDE */
-    O|M, X, O|M, O|M, O|M, O|M, O|M, O|M, /* 0xDF */
-};
-
-static const u8 twobyte_decode[256] = {
-    /* 0x00 - 0x0F */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0x10 - 0x1F */
-    X, X, X, X, X, X, X, X,
-    O|M, X, X, X, X, X, X, X,
-    /* 0x20 - 0x2F */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0x30 - 0x3F */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0x40 - 0x4F */
-    O|M, O|M, O|M, O|M, O|M, O|M, O|M, O|M,
-    O|M, O|M, O|M, O|M, O|M, O|M, O|M, O|M,
-    /* 0x50 - 0x5F */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0x60 - 0x6F */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0x70 - 0x7F */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0x80 - 0x8F */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0x90 - 0x9F */
-    O|M, O|M, O|M, O|M, O|M, O|M, O|M, O|M,
-    O|M, O|M, O|M, O|M, O|M, O|M, O|M, O|M,
-    /* 0xA0 - 0xAF */
-    X, X, X, O|M, O|M|1, O|M, O|M, X,
-    X, X, X, O|M, O|M|1, O|M, X, O|M,
-    /* 0xB0 - 0xBF */
-    X, X, X, O|M, X, X, O|M, O|M,
-    X, X, O|M|1, O|M, O|M, O|M, O|M, O|M,
-    /* 0xC0 - 0xCF */
-    O|M, O|M, X, O|M, X, X, X, O|M,
-    X, X, X, X, X, X, X, X,
-    /* 0xD0 - 0xDF */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0xE0 - 0xEF */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X,
-    /* 0xF0 - 0xFF */
-    X, X, X, X, X, X, X, X,
-    X, X, X, X, X, X, X, X
-};
-
-/*
- * Obtain the base and limit associated with the given segment selector.
- * The selector must identify a 32-bit code or data segment. Any segment that
- * appears to be truncated to not overlap with Xen is assumed to be a truncated
- * 4GB segment, and the returned limit reflects this.
- *  @seg   (IN) : Segment selector to decode.
- *  @base  (OUT): Decoded linear base address.
- *  @limit (OUT): Decoded segment limit, in bytes. 0 == unlimited (4GB).
- */
-static int get_baselimit(u16 seg, unsigned long *base, unsigned long *limit)
-{
-    struct vcpu *curr = current;
-    uint32_t    *table, a, b;
-    int          ldt = !!(seg & 4);
-    int          idx = (seg >> 3) & 8191;
-
-    /* Get base and check limit. */
-    if ( ldt )
-    {
-        table = (uint32_t *)LDT_VIRT_START(curr);
-        if ( idx >= curr->arch.pv_vcpu.ldt_ents )
-            goto fail;
-    }
-    else /* gdt */
-    {
-        table = (uint32_t *)GDT_VIRT_START(curr);
-        if ( idx >= curr->arch.pv_vcpu.gdt_ents )
-            goto fail;
-    }
-
-    /* Grab the segment descriptor. */
-    if ( __get_user(a, &table[2*idx+0]) ||
-         __get_user(b, &table[2*idx+1]) )
-        goto fail; /* Barking up the wrong tree. Decode needs a page fault.*/
-
-    /* We only parse 32-bit code and data segments. */
-    if ( (b & (_SEGMENT_P|_SEGMENT_S|_SEGMENT_DB)) != 
-         (_SEGMENT_P|_SEGMENT_S|_SEGMENT_DB) )
-        goto fail;
-
-    /* Decode base and limit. */
-    *base  = (b&(0xff<<24)) | ((b&0xff)<<16) | (a>>16);
-    *limit = ((b & 0xf0000) | (a & 0x0ffff)) + 1;
-    if ( (b & _SEGMENT_G) )
-        *limit <<= 12;
-
-    /*
-     * Anything that looks like a truncated segment we assume ought really
-     * to be a 4GB segment. DANGER!
-     */
-    if ( (GUEST_SEGMENT_MAX_ADDR - (*base + *limit)) < PAGE_SIZE )
-        *limit = 0;
-
-    return 1;
-
- fail:
-    return 0;
-}
-
-/* Turn a segment+offset into a linear address. */
-static int linearise_address(u16 seg, unsigned long off, unsigned long *linear)
-{
-    unsigned long base, limit;
-
-    if ( !get_baselimit(seg, &base, &limit) )
-        return 0;
-
-    if ( off > (limit-1) )
-        return 0;
-
-    *linear = base + off;
-
-    return 1;
-}
-
-static int fixup_seg(u16 seg, unsigned long offset)
-{
-    struct vcpu *curr = current;
-    uint32_t    *table, a, b, base, limit;
-    int          ldt = !!(seg & 4);
-    int          idx = (seg >> 3) & 8191;
-
-    /* Get base and check limit. */
-    if ( ldt )
-    {
-        table = (uint32_t *)LDT_VIRT_START(curr);
-        if ( idx >= curr->arch.pv_vcpu.ldt_ents )
-        {
-            dprintk(XENLOG_DEBUG, "Segment %04x out of LDT range (%u)\n",
-                    seg, curr->arch.pv_vcpu.ldt_ents);
-            goto fail;
-        }
-    }
-    else /* gdt */
-    {
-        table = (uint32_t *)GDT_VIRT_START(curr);
-        if ( idx >= curr->arch.pv_vcpu.gdt_ents )
-        {
-            dprintk(XENLOG_DEBUG, "Segment %04x out of GDT range (%u)\n",
-                    seg, curr->arch.pv_vcpu.gdt_ents);
-            goto fail;
-        }
-    }
-
-    /* Grab the segment descriptor. */
-    if ( __get_user(a, &table[2*idx+0]) ||
-         __get_user(b, &table[2*idx+1]) )
-    {
-        dprintk(XENLOG_DEBUG, "Fault while reading segment %04x\n", seg);
-        goto fail; /* Barking up the wrong tree. Decode needs a page fault.*/
-    }
-
-    /* We only parse 32-bit page-granularity non-privileged data segments. */
-    if ( (b & (_SEGMENT_P|_SEGMENT_S|_SEGMENT_DB|
-               _SEGMENT_G|_SEGMENT_CODE|_SEGMENT_DPL)) != 
-         (_SEGMENT_P|_SEGMENT_S|_SEGMENT_DB|_SEGMENT_G|_SEGMENT_DPL) )
-    {
-        dprintk(XENLOG_DEBUG, "Bad segment %08x:%08x\n", a, b);
-        goto fail;
-    }
-
-    /* Decode base and limit. */
-    base  = (b&(0xff<<24)) | ((b&0xff)<<16) | (a>>16);
-    limit = (((b & 0xf0000) | (a & 0x0ffff)) + 1) << 12;
-
-    if ( b & _SEGMENT_EC )
-    {
-        /* Expands-down: All the way to zero? Assume 4GB if so. */
-        if ( ((base + limit) < PAGE_SIZE) && (offset <= limit)  )
-        {
-            /* Flip to expands-up. */
-            limit = GUEST_SEGMENT_MAX_ADDR - base;
-            goto flip;
-        }
-    }
-    else
-    {
-        /* Expands-up: All the way to Xen space? Assume 4GB if so. */
-        if ( ((GUEST_SEGMENT_MAX_ADDR - (base + limit)) < PAGE_SIZE) &&
-             (offset > limit) )
-        {
-            /* Flip to expands-down. */
-            limit = -(base & PAGE_MASK);
-            goto flip;
-        }
-    }
-
-    dprintk(XENLOG_DEBUG, "None of the above! (%08x:%08x, %08x, %08x, %08x)\n",
-            a, b, base, limit, base+limit);
-
- fail:
-    return 0;
-
- flip:
-    limit = (limit >> 12) - 1;
-    a &= ~0x0ffff; a |= limit & 0x0ffff;
-    b &= ~0xf0000; b |= limit & 0xf0000;
-    b ^= _SEGMENT_EC; /* grows-up <-> grows-down */
-    /* NB. This can't fault. Checked readable above; must also be writable. */
-    write_atomic((uint64_t *)&table[2*idx], ((uint64_t)b<<32) | a);
-    return 1;
-}
-
-/*
- * Called from the general-protection fault handler to attempt to decode
- * and emulate an instruction that depends on 4GB segments.
- */
-int gpf_emulate_4gb(struct cpu_user_regs *regs)
-{
-    struct vcpu   *curr = current;
-    u8             modrm, mod, rm, decode;
-    const u32     *base, *index = NULL;
-    unsigned long  offset;
-    s8             disp8;
-    s32            disp32 = 0;
-    u8            *eip;         /* ptr to instruction start */
-    u8            *pb, b;       /* ptr into instr. / current instr. byte */
-    int            gs_override = 0, scale = 0, opcode = -1;
-    const u8      *table = insn_decode;
-
-    /* WARNING: We only work for ring-3 segments. */
-    if ( unlikely(vm86_mode(regs)) || unlikely(!ring_3(regs)) )
-        goto fail;
-
-    if ( !linearise_address((u16)regs->cs, regs->eip, (unsigned long *)&eip) )
-    {
-        dprintk(XENLOG_DEBUG, "Cannot linearise %04x:%08x\n",
-                regs->cs, regs->eip);
-        goto fail;
-    }
-
-    /* Parse prefix bytes. We're basically looking for segment override. */
-    for ( pb = eip; ; pb++ )
-    {
-        if ( get_user(b, pb) )
-        {
-            dprintk(XENLOG_DEBUG,
-                    "Fault while accessing byte %ld of instruction\n",
-                    (long)(pb-eip));
-            goto page_fault;
-        }
-
-        if ( (pb - eip) >= 15 )
-        {
-            dprintk(XENLOG_DEBUG, "Too many instruction prefixes for a "
-                    "legal instruction\n");
-            goto fail;
-        }
-
-        if ( opcode != -1 )
-        {
-            opcode = (opcode << 8) | b;
-            break;
-        }
-
-        switch ( b )
-        {
-        case 0x67: /* Address-size override */
-        case 0x2e: /* CS override */
-        case 0x3e: /* DS override */
-        case 0x26: /* ES override */
-        case 0x64: /* FS override */
-        case 0x36: /* SS override */
-            dprintk(XENLOG_DEBUG, "Unhandled prefix %02x\n", b);
-            goto fail;
-        case 0x66: /* Operand-size override */
-        case 0xf0: /* LOCK */
-        case 0xf2: /* REPNE/REPNZ */
-        case 0xf3: /* REP/REPE/REPZ */
-            break;
-        case 0x65: /* GS override */
-            gs_override = 1;
-            break;
-        case 0x0f: /* Not really a prefix byte */
-            table = twobyte_decode;
-            opcode = b;
-            break;
-        case 0xd8: /* Math coprocessor instructions.  */
-        case 0xd9:
-        case 0xda:
-        case 0xdb:
-        case 0xdc:
-        case 0xdd:
-        case 0xde:
-        case 0xdf:
-            /* Float opcodes have a secondary opcode in the modrm byte.  */
-            table = float_decode;
-            if ( get_user(modrm, pb + 1) )
-            {
-                dprintk(XENLOG_DEBUG, "Fault while extracting modrm byte\n");
-                goto page_fault;
-            }
-
-            opcode = (b << 8) | modrm;
-            b = ((b & 7) << 3) + ((modrm >> 3) & 7);
-            goto done_prefix;
-
-        default: /* Not a prefix byte */
-            goto done_prefix;
-        }
-    }
- done_prefix:
-
-    if ( !gs_override )
-    {
-        dprintk(XENLOG_DEBUG, "Only instructions with GS override\n");
-        goto fail;
-    }
-
-    decode = table[b];
-    pb++;
-
-    if ( !(decode & OPCODE_BYTE) )
-    {
-        if (opcode == -1)
-            dprintk(XENLOG_DEBUG, "Unsupported opcode %02x\n", b);
-        else
-            dprintk(XENLOG_DEBUG, "Unsupported opcode %02x %02x\n",
-                    opcode >> 8, opcode & 255);
-        goto fail;
-    }
-
-    if ( !(decode & HAS_MODRM) )
-    {
-        /* Must be a <disp32>, or bail. */
-        if ( (decode & INSN_SUFFIX_BYTES) != 4 )
-            goto fail;
-
-        if ( get_user(offset, (u32 *)pb) )
-        {
-            dprintk(XENLOG_DEBUG, "Fault while extracting <moffs32>.\n");
-            goto page_fault;
-        }
-        pb += 4;
-
-        goto skip_modrm;
-    }
-
-    /*
-     * Mod/RM processing.
-     */
-
-    if ( get_user(modrm, pb) )
-    {
-        dprintk(XENLOG_DEBUG, "Fault while extracting modrm byte\n");
-        goto page_fault;
-    }
-
-    pb++;
-
-    mod = (modrm >> 6) & 3;
-    rm  = (modrm >> 0) & 7;
-
-    if ( rm == 4 )
-    {
-        u8 sib;
-
-        if ( get_user(sib, pb) )
-        {
-            dprintk(XENLOG_DEBUG, "Fault while extracting sib byte\n");
-            goto page_fault;
-        }
-
-        pb++;
-
-        rm = sib & 7;
-        if ( (sib & 0x38) != 0x20 )
-            index = decode_register((sib >> 3) & 7, regs, 0);
-        scale = sib >> 6;
-    }
-
-    /* Decode R/M field. */
-    base = decode_register(rm, regs, 0);
-
-    /* Decode Mod field. */
-    switch ( mod )
-    {
-    case 0:
-        if ( rm == 5 ) /* disp32 rather than (EBP) */
-        {
-            base = NULL;
-            if ( get_user(disp32, (u32 *)pb) )
-            {
-                dprintk(XENLOG_DEBUG, "Fault while extracting <base32>.\n");
-                goto page_fault;
-            }
-            pb += 4;
-        }
-        break;
-
-    case 1:
-        if ( get_user(disp8, pb) )
-        {
-            dprintk(XENLOG_DEBUG, "Fault while extracting <disp8>.\n");
-            goto page_fault;
-        }
-        pb++;
-        disp32 = disp8;
-        break;
-
-    case 2:
-        if ( get_user(disp32, (u32 *)pb) )
-        {
-            dprintk(XENLOG_DEBUG, "Fault while extracting <disp32>.\n");
-            goto page_fault;
-        }
-        pb += 4;
-        break;
-
-    case 3:
-        dprintk(XENLOG_DEBUG, "Not a memory operand!\n");
-        goto fail;
-    }
-
-    offset = disp32;
-    if ( base != NULL )
-        offset += *base;
-    if ( index != NULL )
-        offset += *index << scale;
-
- skip_modrm:
-    if ( !fixup_seg((u16)regs->gs, offset) )
-        goto fail;
-
-    /* Success! */
-    perfc_incr(seg_fixups);
-
-    /* If requested, give a callback on otherwise unused vector 15. */
-    if ( VM_ASSIST(curr->domain, VMASST_TYPE_4gb_segments_notify) )
-    {
-        struct trap_info   *ti  = &curr->arch.pv_vcpu.trap_ctxt[15];
-        struct trap_bounce *tb  = &curr->arch.pv_vcpu.trap_bounce;
-
-        tb->flags      = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
-        tb->error_code = pb - eip;
-        tb->cs         = ti->cs;
-        tb->eip        = ti->address;
-        if ( TI_GET_IF(ti) )
-            tb->flags |= TBF_INTERRUPT;
-    }
-
-    return EXCRET_fault_fixed;
-
- fail:
-    return 0;
-
- page_fault:
-    propagate_page_fault((unsigned long)pb, 0); /* read fault */
-    return EXCRET_fault_fixed;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff -r 05d82fb18335 -r bc8cb4778702 
xen/arch/x86/x86_32/supervisor_mode_kernel.S
--- a/xen/arch/x86/x86_32/supervisor_mode_kernel.S      Wed Sep 12 13:24:28 
2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,145 +0,0 @@
-/*
- * Handle stack fixup for guest running in RING 0.
- *
- * Copyright (c) 2006 Ian Campbell
- *
- * When a guest kernel is allowed to run in RING 0 a hypercall,
- * interrupt or exception interrupting the guest kernel will not cause
- * a privilege level change and therefore the stack will not be swapped
- * to the Xen stack.
- *
- * To fix this we look for RING 0 activation frames with a stack
- * pointer below HYPERVISOR_VIRT_START (indicating a guest kernel
- * frame) and fix this up by locating the Xen stack via the TSS
- * and moving the activation frame to the Xen stack. In the process we
- * convert the frame into an inter-privilege frame returning to RING 1
- * so that we can catch and reverse the process on exit.
- */
-
-#include <xen/config.h>
-#include <asm/asm_defns.h>
-#include <public/xen.h>
-
-#define guestreg(field) ((field)-UREGS_eip+36)
-
-        # Upon entry the stack should be the Xen stack and contain:
-        #   %ss, %esp, EFLAGS, %cs|1, %eip, RETURN
-        # On exit the stack should be %ss:%esp (i.e. the guest stack)
-        # and contain:
-        #   EFLAGS, %cs, %eip, RETURN
-        ALIGN
-ENTRY(restore_ring0_guest)
-        pusha
-
-        # Point %gs:%esi to guest stack.
-RRG0:   movw guestreg(UREGS_ss)(%esp),%gs
-        movl guestreg(UREGS_esp)(%esp),%esi
-
-        # Copy EFLAGS, %cs, %eip, RETURN, PUSHA from Xen stack to guest stack.
-        movl $12,%ecx /* 12 32-bit values */
-
-1:      subl $4,%esi
-        movl -4(%esp,%ecx,4),%eax
-RRG1:   movl %eax,%gs:(%esi)
-        loop 1b
-
-RRG2:   andl $~3,%gs:guestreg(UREGS_cs)(%esi)
-
-        movl %gs,%eax
-
-        # We need to do this because these registers are not present
-        # on the guest stack so they cannot be restored by the code in
-        # restore_all_guest.
-RRG3:   mov  guestreg(UREGS_ds)(%esp),%ds
-RRG4:   mov  guestreg(UREGS_es)(%esp),%es
-RRG5:   mov  guestreg(UREGS_fs)(%esp),%fs
-RRG6:   mov  guestreg(UREGS_gs)(%esp),%gs
-
-RRG7:   movl %eax,%ss
-        movl %esi,%esp
-
-        popa
-        ret
-.section __ex_table,"a"
-        .long RRG0,domain_crash_synchronous
-        .long RRG1,domain_crash_synchronous
-        .long RRG2,domain_crash_synchronous
-        .long RRG3,domain_crash_synchronous
-        .long RRG4,domain_crash_synchronous
-        .long RRG5,domain_crash_synchronous
-        .long RRG6,domain_crash_synchronous
-        .long RRG7,domain_crash_synchronous
-.previous
-
-        # Upon entry the stack should be a guest stack and contain:
-        #   EFLAGS, %cs, %eip, ERROR, RETURN
-        # On exit the stack should be the Xen stack and contain:
-        #   %ss, %esp, EFLAGS, %cs|1, %eip, ERROR, RETURN
-        ALIGN
-ENTRY(fixup_ring0_guest_stack)
-        pushl %eax
-        pushl %ecx
-        pushl %ds
-        pushl %gs
-        pushl %esi
-
-        movw  $__HYPERVISOR_DS,%ax
-        movw  %ax,%ds
-
-        # Point %gs:%esi to guest stack frame.
-        movw  %ss,%ax
-        movw  %ax,%gs
-        movl  %esp,%esi
-        # Account for entries on the guest stack:
-        # * Pushed by normal exception/interrupt/hypercall mechanisms
-        #   * EFLAGS, %cs, %eip, ERROR == 4 words.
-        # * Pushed by the fixup routine
-        #   * [RETURN], %eax, %ecx, %ds, %gs and %esi == 6 words.
-        addl $((6+4)*4),%esi
-
-        # %gs:%esi now points to the guest stack before the
-        # interrupt/exception occurred.
-
-        movl  $PER_CPU_GDT_ENTRY*8,%ecx
-        lsll  %ecx,%ecx
-        movl  __per_cpu_offset(,%ecx,4),%ecx
-        addl  $per_cpu__init_tss,%ecx
-
-        # Load Xen stack from TSS.
-        movw  TSS_ss0(%ecx),%ax
-TRP1:   movw  %ax,%ss
-        movl  TSS_esp0(%ecx),%esp
-
-        pushl %gs
-        pushl %esi
-
-        # Move EFLAGS, %cs, %eip, ERROR, RETURN, %eax, %ecx, %ds, %gs, %esi
-        # from guest stack to Xen stack.
-        movl  $10,%ecx
-1:      subl  $4,%esp
-        subl  $4,%esi
-TRP2:   movl  %gs:(%esi),%eax
-        movl  %eax,(%esp)
-        loop  1b
-
-        # CS = CS|1 to simulate RING1 stack frame.
-        orl   $1,32(%esp)
-
-        popl  %esi
-        popl  %gs
-        popl  %ds
-        popl  %ecx
-        popl  %eax
-        ret
-.section __ex_table,"a"
-        .long TRP1,domain_crash_synchronous
-        .long TRP2,domain_crash_synchronous
-.previous
-
-domain_crash_synchronous_string:
-        .asciz "domain_crash_sync called from supervisor_mode_kernel.S (%lx)\n"
-
-domain_crash_synchronous:
-        pushl $domain_crash_synchronous_string
-        call  printk
-        jmp   __domain_crash_synchronous
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c       Wed Sep 12 13:24:28 2012 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,637 +0,0 @@
-
-#include <xen/config.h>
-#include <xen/version.h>
-#include <xen/domain_page.h>
-#include <xen/init.h>
-#include <xen/sched.h>
-#include <xen/lib.h>
-#include <xen/console.h>
-#include <xen/mm.h>
-#include <xen/irq.h>
-#include <xen/symbols.h>
-#include <xen/shutdown.h>
-#include <xen/nmi.h>
-#include <xen/cpu.h>
-#include <xen/guest_access.h>
-#include <asm/current.h>
-#include <asm/flushtlb.h>
-#include <asm/traps.h>
-#include <asm/hvm/hvm.h>
-#include <asm/hvm/support.h>
-
-#include <public/callback.h>
-
-static void print_xen_info(void)
-{
-    char taint_str[TAINT_STRING_MAX_LEN];
-    char debug = 'n', *arch = "x86_32p";
-
-#ifndef NDEBUG
-    debug = 'y';
-#endif
-
-    printk("----[ Xen-%d.%d%s  %s  debug=%c  %s ]----\n",
-           xen_major_version(), xen_minor_version(), xen_extra_version(),
-           arch, debug, print_tainted(taint_str));
-}
-
-enum context { CTXT_hypervisor, CTXT_pv_guest, CTXT_hvm_guest };
-
-static void _show_registers(
-    const struct cpu_user_regs *regs, unsigned long crs[8],
-    enum context context, const struct vcpu *v)
-{
-    const static char *context_names[] = {
-        [CTXT_hypervisor] = "hypervisor",
-        [CTXT_pv_guest]   = "pv guest",
-        [CTXT_hvm_guest]  = "hvm guest"
-    };
-
-    printk("EIP:    %04x:[<%08x>]", regs->cs, regs->eip);
-    if ( context == CTXT_hypervisor )
-        print_symbol(" %s", regs->eip);
-    printk("\nEFLAGS: %08x   ", regs->eflags);
-    if ( (context == CTXT_pv_guest) && v && v->vcpu_info )
-        printk("EM: %d   ", !!v->vcpu_info->evtchn_upcall_mask);
-    printk("CONTEXT: %s\n", context_names[context]);
-
-    printk("eax: %08x   ebx: %08x   ecx: %08x   edx: %08x\n",
-           regs->eax, regs->ebx, regs->ecx, regs->edx);
-    printk("esi: %08x   edi: %08x   ebp: %08x   esp: %08x\n",
-           regs->esi, regs->edi, regs->ebp, regs->esp);
-    printk("cr0: %08lx   cr4: %08lx   cr3: %08lx   cr2: %08lx\n",
-           crs[0], crs[4], crs[3], crs[2]);
-    printk("ds: %04x   es: %04x   fs: %04x   gs: %04x   "
-           "ss: %04x   cs: %04x\n",
-           regs->ds, regs->es, regs->fs,
-           regs->gs, regs->ss, regs->cs);
-}
-
-void show_registers(struct cpu_user_regs *regs)
-{
-    struct cpu_user_regs fault_regs = *regs;
-    unsigned long fault_crs[8];
-    enum context context;
-    struct vcpu *v = current;
-
-    if ( is_hvm_vcpu(v) && guest_mode(regs) )
-    {
-        struct segment_register sreg;
-        context = CTXT_hvm_guest;
-        fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0];
-        fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2];
-        fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3];
-        fault_crs[4] = v->arch.hvm_vcpu.guest_cr[4];
-        hvm_get_segment_register(v, x86_seg_cs, &sreg);
-        fault_regs.cs = sreg.sel;
-        hvm_get_segment_register(v, x86_seg_ds, &sreg);
-        fault_regs.ds = sreg.sel;
-        hvm_get_segment_register(v, x86_seg_es, &sreg);
-        fault_regs.es = sreg.sel;
-        hvm_get_segment_register(v, x86_seg_fs, &sreg);
-        fault_regs.fs = sreg.sel;
-        hvm_get_segment_register(v, x86_seg_gs, &sreg);
-        fault_regs.gs = sreg.sel;
-        hvm_get_segment_register(v, x86_seg_ss, &sreg);
-        fault_regs.ss = sreg.sel;
-    }
-    else
-    {
-        if ( !guest_mode(regs) )
-        {
-            context = CTXT_hypervisor;
-            fault_regs.esp = (unsigned long)&regs->esp;
-            fault_regs.ss = read_segment_register(ss);
-            fault_regs.ds = read_segment_register(ds);
-            fault_regs.es = read_segment_register(es);
-            fault_regs.fs = read_segment_register(fs);
-            fault_regs.gs = read_segment_register(gs);
-            fault_crs[2] = read_cr2();
-        }
-        else
-        {
-            context = CTXT_pv_guest;
-            fault_crs[2] = v->vcpu_info->arch.cr2;
-        }
-
-        fault_crs[0] = read_cr0();
-        fault_crs[3] = read_cr3();
-        fault_crs[4] = read_cr4();
-    }
-
-    print_xen_info();
-    printk("CPU:    %d\n", smp_processor_id());
-    _show_registers(&fault_regs, fault_crs, context, v);
-
-    if ( this_cpu(ler_msr) && !guest_mode(regs) )
-    {
-        u32 from, to, hi;
-        rdmsr(this_cpu(ler_msr), from, hi);
-        rdmsr(this_cpu(ler_msr) + 1, to, hi);
-        printk("ler: %08x -> %08x\n", from, to);
-    }
-}
-
-void vcpu_show_registers(const struct vcpu *v)
-{
-    unsigned long crs[8];
-
-    /* No need to handle HVM for now. */
-    if ( is_hvm_vcpu(v) )
-        return;
-
-    crs[0] = v->arch.pv_vcpu.ctrlreg[0];
-    crs[2] = v->vcpu_info->arch.cr2;
-    crs[3] = pagetable_get_paddr(v->arch.guest_table);
-    crs[4] = v->arch.pv_vcpu.ctrlreg[4];
-
-    _show_registers(&v->arch.user_regs, crs, CTXT_pv_guest, v);
-}
-
-void show_page_walk(unsigned long addr)
-{
-    unsigned long pfn, mfn, cr3 = read_cr3();
-    l3_pgentry_t l3e, *l3t;
-    l2_pgentry_t l2e, *l2t;
-    l1_pgentry_t l1e, *l1t;
-
-    printk("Pagetable walk from %08lx:\n", addr);
-
-    mfn = cr3 >> PAGE_SHIFT;
-
-    l3t  = map_domain_page(mfn);
-    l3t += (cr3 & 0xFE0UL) >> 3;
-    l3e = l3t[l3_table_offset(addr)];
-    mfn = l3e_get_pfn(l3e);
-    pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
-          get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
-    printk(" L3[0x%03lx] = %"PRIpte" %08lx\n",
-           l3_table_offset(addr), l3e_get_intpte(l3e), pfn);
-    unmap_domain_page(l3t);
-    if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ||
-         !mfn_valid(mfn) )
-        return;
-
-    l2t = map_domain_page(mfn);
-    l2e = l2t[l2_table_offset(addr)];
-    mfn = l2e_get_pfn(l2e);
-    pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
-          get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
-    printk(" L2[0x%03lx] = %"PRIpte" %08lx %s\n",
-           l2_table_offset(addr), l2e_get_intpte(l2e), pfn,
-           (l2e_get_flags(l2e) & _PAGE_PSE) ? "(PSE)" : "");
-    unmap_domain_page(l2t);
-    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
-         (l2e_get_flags(l2e) & _PAGE_PSE) ||
-         !mfn_valid(mfn) )
-        return;
-
-    l1t = map_domain_page(mfn);
-    l1e = l1t[l1_table_offset(addr)];
-    mfn = l1e_get_pfn(l1e);
-    pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
-          get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
-    printk(" L1[0x%03lx] = %"PRIpte" %08lx\n",
-           l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
-    unmap_domain_page(l1t);
-}
-
-static DEFINE_PER_CPU_READ_MOSTLY(struct tss_struct *, doublefault_tss);
-static unsigned char __attribute__ ((__section__ (".bss.page_aligned")))
-    boot_cpu_doublefault_space[PAGE_SIZE];
-
-static int cpu_doublefault_tss_callback(
-    struct notifier_block *nfb, unsigned long action, void *hcpu)
-{
-    unsigned int cpu = (unsigned long)hcpu;
-    void *p;
-    int rc = 0;
-
-    switch ( action )
-    {
-    case CPU_UP_PREPARE:
-        per_cpu(doublefault_tss, cpu) = p = alloc_xenheap_page();
-        if ( p == NULL )
-            rc = -ENOMEM;
-        else
-            memset(p, 0, PAGE_SIZE);
-        break;
-    case CPU_UP_CANCELED:
-    case CPU_DEAD:
-        free_xenheap_page(per_cpu(doublefault_tss, cpu));
-        break;
-    default:
-        break;
-    }
-
-    return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
-}
-
-static struct notifier_block cpu_doublefault_tss_nfb = {
-    .notifier_call = cpu_doublefault_tss_callback
-};
-
-void do_double_fault(void)
-{
-    struct tss_struct *tss;
-    unsigned int cpu;
-
-    watchdog_disable();
-
-    console_force_unlock();
-
-    asm ( "lsll %1, %0" : "=r" (cpu) : "rm" (PER_CPU_GDT_ENTRY << 3) );
-
-    /* Find information saved during fault and dump it to the console. */
-    tss = &per_cpu(init_tss, cpu);
-    printk("*** DOUBLE FAULT ***\n");
-    print_xen_info();
-    printk("CPU:    %d\nEIP:    %04x:[<%08x>]",
-           cpu, tss->cs, tss->eip);
-    print_symbol(" %s\n", tss->eip);
-    printk("EFLAGS: %08x\n", tss->eflags);
-    printk("CR3:    %08x\n", tss->__cr3);
-    printk("eax: %08x   ebx: %08x   ecx: %08x   edx: %08x\n",
-           tss->eax, tss->ebx, tss->ecx, tss->edx);
-    printk("esi: %08x   edi: %08x   ebp: %08x   esp: %08x\n",
-           tss->esi, tss->edi, tss->ebp, tss->esp);
-    printk("ds: %04x   es: %04x   fs: %04x   gs: %04x   ss: %04x\n",
-           tss->ds, tss->es, tss->fs, tss->gs, tss->ss);
-    show_stack_overflow(cpu, tss->esp);
-
-    panic("DOUBLE FAULT -- system shutdown\n");
-}
-
-unsigned long do_iret(void)
-{
-    struct cpu_user_regs *regs = guest_cpu_user_regs();
-    struct vcpu *v = current;
-    u32 eflags;
-
-    /* Check worst-case stack frame for overlap with Xen protected area. */
-    if ( unlikely(!access_ok(regs->esp, 40)) )
-        goto exit_and_crash;
-
-    /* Pop and restore EAX (clobbered by hypercall). */
-    if ( unlikely(__copy_from_user(&regs->eax, (void *)regs->esp, 4)) )
-        goto exit_and_crash;
-    regs->esp += 4;
-
-    /* Pop and restore CS and EIP. */
-    if ( unlikely(__copy_from_user(&regs->eip, (void *)regs->esp, 8)) )
-        goto exit_and_crash;
-    regs->esp += 8;
-
-    /*
-     * Pop, fix up and restore EFLAGS. We fix up in a local staging area
-     * to avoid firing the BUG_ON(IOPL) check in arch_get_info_guest.
-     */
-    if ( unlikely(__copy_from_user(&eflags, (void *)regs->esp, 4)) )
-        goto exit_and_crash;
-    regs->esp += 4;
-    regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
-
-    if ( vm86_mode(regs) )
-    {
-        /* Return to VM86 mode: pop and restore ESP,SS,ES,DS,FS and GS. */
-        if ( __copy_from_user(&regs->esp, (void *)regs->esp, 24) )
-            goto exit_and_crash;
-    }
-    else if ( unlikely(ring_0(regs)) )
-    {
-        goto exit_and_crash;
-    }
-    else if ( !ring_1(regs) )
-    {
-        /* Return to ring 2/3: pop and restore ESP and SS. */
-        if ( __copy_from_user(&regs->esp, (void *)regs->esp, 8) )
-            goto exit_and_crash;
-    }
-
-    /* Restore upcall mask from supplied EFLAGS.IF. */
-    vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
-
-    async_exception_cleanup(v);
-
-    /*
-     * The hypercall exit path will overwrite EAX with this return
-     * value.
-     */
-    return regs->eax;
-
- exit_and_crash:
-    gdprintk(XENLOG_ERR, "Fatal error\n");
-    domain_crash(v->domain);
-    return 0;
-}
-
-static void set_task_gate(unsigned int n, unsigned int sel)
-{
-    idt_table[n].b = 0;
-    wmb(); /* disable gate /then/ rewrite */
-    idt_table[n].a = sel << 16;
-    wmb(); /* rewrite /then/ enable gate */
-    idt_table[n].b = 0x8500;
-}
-
-void __devinit subarch_percpu_traps_init(void)
-{
-    struct tss_struct *tss;
-    int cpu = smp_processor_id();
-
-    if ( cpu == 0 )
-    {
-        /* The hypercall entry vector is only accessible from ring 1. */
-        _set_gate(idt_table+HYPERCALL_VECTOR, 14, 1, &hypercall);
-
-        this_cpu(doublefault_tss) = (void *)boot_cpu_doublefault_space;
-
-        register_cpu_notifier(&cpu_doublefault_tss_nfb);
-    }
-
-    tss = this_cpu(doublefault_tss);
-    BUG_ON(tss == NULL);
-
-    /*
-     * Make a separate task for double faults. This will get us debug output if
-     * we blow the kernel stack.
-     */
-    tss->ds     = __HYPERVISOR_DS;
-    tss->es     = __HYPERVISOR_DS;
-    tss->ss     = __HYPERVISOR_DS;
-    tss->esp    = (unsigned long)tss + PAGE_SIZE;
-    tss->__cr3  = __pa(idle_pg_table);
-    tss->cs     = __HYPERVISOR_CS;
-    tss->eip    = (unsigned long)do_double_fault;
-    tss->eflags = 2;
-    tss->bitmap = IOBMP_INVALID_OFFSET;
-    _set_tssldt_desc(
-        this_cpu(gdt_table) + DOUBLEFAULT_TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY,
-        (unsigned long)tss, 235, 9);
-
-    set_task_gate(TRAP_double_fault, DOUBLEFAULT_TSS_ENTRY << 3);
-}
-
-void init_int80_direct_trap(struct vcpu *v)
-{
-    struct trap_info *ti = &v->arch.pv_vcpu.trap_ctxt[0x80];
-
-    /*
-     * We can't virtualise interrupt gates, as there's no way to get
-     * the CPU to automatically clear the events_mask variable. Also we
-     * must ensure that the CS is safe to poke into an interrupt gate.
-     *
-     * When running with supervisor_mode_kernel enabled a direct trap
-     * to the guest OS cannot be used because the INT instruction will
-     * switch to the Xen stack and we need to swap back to the guest
-     * kernel stack before passing control to the system call entry point.
-     */
-    if ( TI_GET_IF(ti) || !guest_gate_selector_okay(v->domain, ti->cs) ||
-         supervisor_mode_kernel )
-    {
-        v->arch.pv_vcpu.int80_desc.a = v->arch.pv_vcpu.int80_desc.b = 0;
-        return;
-    }
-
-    v->arch.pv_vcpu.int80_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
-    v->arch.pv_vcpu.int80_desc.b =
-        (ti->address & 0xffff0000) | 0x8f00 | ((TI_GET_DPL(ti) & 3) << 13);
-
-    if ( v == current )
-        set_int80_direct_trap(v);
-}
-
-#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
-static void do_update_sysenter(void *info)
-{
-    xen_callback_t *address = info;
-
-    wrmsr(MSR_IA32_SYSENTER_CS, address->cs, 0);
-    wrmsr(MSR_IA32_SYSENTER_EIP, address->eip, 0);
-}
-#endif
-
-static long register_guest_callback(struct callback_register *reg)
-{
-    long ret = 0;
-    struct vcpu *v = current;
-
-    fixup_guest_code_selector(v->domain, reg->address.cs);
-
-    switch ( reg->type )
-    {
-    case CALLBACKTYPE_event:
-        v->arch.pv_vcpu.event_callback_cs     = reg->address.cs;
-        v->arch.pv_vcpu.event_callback_eip    = reg->address.eip;
-        break;
-
-    case CALLBACKTYPE_failsafe:
-        v->arch.pv_vcpu.failsafe_callback_cs  = reg->address.cs;
-        v->arch.pv_vcpu.failsafe_callback_eip = reg->address.eip;
-        if ( reg->flags & CALLBACKF_mask_events )
-            set_bit(_VGCF_failsafe_disables_events,
-                    &v->arch.vgc_flags);
-        else
-            clear_bit(_VGCF_failsafe_disables_events,
-                      &v->arch.vgc_flags);
-        break;
-
-#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
-    case CALLBACKTYPE_sysenter_deprecated:
-        if ( !cpu_has_sep )
-            ret = -EINVAL;
-        else
-            on_each_cpu(do_update_sysenter, &reg->address, 1);
-        break;
-
-    case CALLBACKTYPE_sysenter:
-        if ( !cpu_has_sep )
-            ret = -EINVAL;
-        else
-            do_update_sysenter(&reg->address);
-        break;
-#endif
-
-    case CALLBACKTYPE_nmi:
-        ret = register_guest_nmi_callback(reg->address.eip);
-        break;
-
-    default:
-        ret = -ENOSYS;
-        break;
-    }
-
-    return ret;
-}
-
-static long unregister_guest_callback(struct callback_unregister *unreg)
-{
-    long ret;
-
-    switch ( unreg->type )
-    {
-    case CALLBACKTYPE_event:
-    case CALLBACKTYPE_failsafe:
-#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
-    case CALLBACKTYPE_sysenter_deprecated:
-    case CALLBACKTYPE_sysenter:
-#endif
-        ret = -EINVAL;
-        break;
-
-    case CALLBACKTYPE_nmi:
-        ret = unregister_guest_nmi_callback();
-        break;
-
-    default:
-        ret = -ENOSYS;
-        break;
-    }
-
-    return ret;
-}
-
-
-long do_callback_op(int cmd, XEN_GUEST_HANDLE(const_void) arg)
-{
-    long ret;
-
-    switch ( cmd )
-    {
-    case CALLBACKOP_register:
-    {
-        struct callback_register reg;
-
-        ret = -EFAULT;
-        if ( copy_from_guest(&reg, arg, 1) )
-            break;
-
-        ret = register_guest_callback(&reg);
-    }
-    break;
-
-    case CALLBACKOP_unregister:
-    {
-        struct callback_unregister unreg;
-
-        ret = -EFAULT;
-        if ( copy_from_guest(&unreg, arg, 1) )
-            break;
-
-        ret = unregister_guest_callback(&unreg);
-    }
-    break;
-
-    default:
-        ret = -ENOSYS;
-        break;
-    }
-
-    return ret;
-}
-
-long do_set_callbacks(unsigned long event_selector,
-                      unsigned long event_address,
-                      unsigned long failsafe_selector,
-                      unsigned long failsafe_address)
-{
-    struct callback_register event = {
-        .type = CALLBACKTYPE_event,
-        .address = { event_selector, event_address },
-    };
-    struct callback_register failsafe = {
-        .type = CALLBACKTYPE_failsafe,
-        .address = { failsafe_selector, failsafe_address },
-    };
-
-    register_guest_callback(&event);
-    register_guest_callback(&failsafe);
-
-    return 0;
-}
-
-static void hypercall_page_initialise_ring0_kernel(void *hypercall_page)
-{
-    char *p;
-    int i;
-
-    /* Fill in all the transfer points with template machine code. */
-
-    for ( i = 0; i < (PAGE_SIZE / 32); i++ )
-    {
-        p = (char *)(hypercall_page + (i * 32));
-
-        *(u8  *)(p+ 0) = 0x9c;      /* pushf */
-        *(u8  *)(p+ 1) = 0xfa;      /* cli */
-        *(u8  *)(p+ 2) = 0xb8;      /* mov $<i>,%eax */
-        *(u32 *)(p+ 3) = i;
-        *(u8  *)(p+ 7) = 0x9a;      /* lcall $__HYPERVISOR_CS,&hypercall */
-        *(u32 *)(p+ 8) = (u32)&hypercall;
-        *(u16 *)(p+12) = (u16)__HYPERVISOR_CS;
-        *(u8  *)(p+14) = 0xc3;      /* ret */
-    }
-
-    /*
-     * HYPERVISOR_iret is special because it doesn't return and expects a
-     * special stack frame. Guests jump at this transfer point instead of
-     * calling it.
-     */
-    p = (char *)(hypercall_page + (__HYPERVISOR_iret * 32));
-    *(u8  *)(p+ 0) = 0x50;      /* push %eax */
-    *(u8  *)(p+ 1) = 0x9c;      /* pushf */
-    *(u8  *)(p+ 2) = 0xfa;      /* cli */
-    *(u8  *)(p+ 3) = 0xb8;      /* mov $<i>,%eax */
-    *(u32 *)(p+ 4) = __HYPERVISOR_iret;
-    *(u8  *)(p+ 8) = 0x9a;      /* lcall $__HYPERVISOR_CS,&hypercall */
-    *(u32 *)(p+ 9) = (u32)&hypercall;
-    *(u16 *)(p+13) = (u16)__HYPERVISOR_CS;
-}
-
-static void hypercall_page_initialise_ring1_kernel(void *hypercall_page)
-{
-    char *p;
-    int i;
-
-    /* Fill in all the transfer points with template machine code. */
-
-    for ( i = 0; i < (PAGE_SIZE / 32); i++ )
-    {
-        p = (char *)(hypercall_page + (i * 32));
-        *(u8  *)(p+ 0) = 0xb8;    /* mov  $<i>,%eax */
-        *(u32 *)(p+ 1) = i;
-        *(u16 *)(p+ 5) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int  $xx */
-        *(u8  *)(p+ 7) = 0xc3;    /* ret */
-    }
-
-    /*
-     * HYPERVISOR_iret is special because it doesn't return and expects a 
-     * special stack frame. Guests jump at this transfer point instead of 
-     * calling it.
-     */
-    p = (char *)(hypercall_page + (__HYPERVISOR_iret * 32));
-    *(u8  *)(p+ 0) = 0x50;    /* push %eax */
-    *(u8  *)(p+ 1) = 0xb8;    /* mov  $__HYPERVISOR_iret,%eax */
-    *(u32 *)(p+ 2) = __HYPERVISOR_iret;
-    *(u16 *)(p+ 6) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int  $xx */
-}
-
-void hypercall_page_initialise(struct domain *d, void *hypercall_page)
-{
-    memset(hypercall_page, 0xCC, PAGE_SIZE);
-    if ( is_hvm_domain(d) )
-        hvm_hypercall_page_initialise(d, hypercall_page);
-    else if ( supervisor_mode_kernel )
-        hypercall_page_initialise_ring0_kernel(hypercall_page);
-    else
-        hypercall_page_initialise_ring1_kernel(hypercall_page);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff -r 05d82fb18335 -r bc8cb4778702 xen/arch/x86/xen.lds.S
--- a/xen/arch/x86/xen.lds.S    Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/arch/x86/xen.lds.S    Wed Sep 12 13:29:30 2012 +0100
@@ -18,11 +18,7 @@ ENTRY(efi_start)
 
 #else /* !EFI */
 
-#ifdef __x86_64__
 #define FORMAT "elf64-x86-64"
-#else
-#define FORMAT "elf32-i386"
-#endif
 
 ENTRY(start)
 
@@ -30,11 +26,7 @@ ENTRY(start)
 
 OUTPUT_FORMAT(FORMAT, FORMAT, FORMAT)
 
-#ifdef __x86_64__
 OUTPUT_ARCH(i386:x86-64)
-#else
-OUTPUT_ARCH(i386)
-#endif
 
 PHDRS
 {
@@ -42,7 +34,7 @@ PHDRS
 }
 SECTIONS
 {
-#if defined(__x86_64__) && !defined(EFI)
+#if !defined(EFI)
   . = __XEN_VIRT_START;
   __image_base__ = .;
 #endif
diff -r 05d82fb18335 -r bc8cb4778702 xen/common/Makefile
--- a/xen/common/Makefile       Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/common/Makefile       Wed Sep 12 13:29:30 2012 +0100
@@ -56,7 +56,6 @@ obj-$(CONFIG_XENCOMM) += xencomm.o
 
 subdir-$(CONFIG_COMPAT) += compat
 
-subdir-$(x86_32) += hvm
 subdir-$(x86_64) += hvm
 
 subdir-y += libelf
diff -r 05d82fb18335 -r bc8cb4778702 xen/common/tmem.c
--- a/xen/common/tmem.c Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/common/tmem.c Wed Sep 12 13:29:30 2012 +0100
@@ -50,7 +50,7 @@
 #define INVERT_SENTINEL(_x,_y) _x->sentinel = ~_y##_SENTINEL
 #define ASSERT_SENTINEL(_x,_y) \
     ASSERT(_x->sentinel != ~_y##_SENTINEL);ASSERT(_x->sentinel == 
_y##_SENTINEL)
-#if defined(__i386__) || defined(CONFIG_ARM)
+#if defined(CONFIG_ARM)
 #define POOL_SENTINEL 0x87658765
 #define OBJ_SENTINEL 0x12345678
 #define OBJNODE_SENTINEL 0xfedcba09
@@ -1233,11 +1233,7 @@ static client_t *client_create(cli_id_t 
         goto fail;
     }
     client->cli_id = cli_id;
-#ifdef __i386__
-    client->compress = 0;
-#else
     client->compress = tmh_compression_enabled();
-#endif
     client->shared_auth_required = tmh_shared_auth();
     for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
         client->shared_auth_uuid[i][0] =
@@ -1460,9 +1456,6 @@ static NOINLINE int do_tmem_put_compress
     ASSERT_SPINLOCK(&pgp->us.obj->obj_spinlock);
     ASSERT(pgp->us.obj->pool != NULL);
     ASSERT(pgp->us.obj->pool->client != NULL);
-#ifdef __i386__
-    return -ENOMEM;
-#endif
 
     if ( pgp->pfp != NULL )
         pgp_free_data(pgp, pgp->us.obj->pool);
@@ -2275,9 +2268,6 @@ static int tmemc_set_var_one(client_t *c
                         arg1, cli_id_str, cli_id);
         break;
     case TMEMC_SET_COMPRESS:
-#ifdef __i386__
-        return -1;
-#endif
         if ( tmh_dedup_enabled() )
         {
             tmh_client_warn("tmem: compression %s for all %ss, cannot be 
changed when tmem_dedup is enabled\n",
@@ -2892,9 +2882,6 @@ EXPORT void *tmem_relinquish_pages(unsig
 
     if (!tmh_enabled() || !tmh_freeable_pages())
         return NULL;
-#ifdef __i386__
-    return NULL;
-#endif
 
     relinq_attempts++;
     if ( order > 0 )
diff -r 05d82fb18335 -r bc8cb4778702 xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c     Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/common/tmem_xen.c     Wed Sep 12 13:29:30 2012 +0100
@@ -348,7 +348,6 @@ EXPORT void tmh_scrub_page(struct page_i
         scrub_one_page(pi);
 }
 
-#ifndef __i386__
 static noinline void *tmh_mempool_page_get(unsigned long size)
 {
     struct page_info *pi;
@@ -398,7 +397,6 @@ static void tmh_persistent_pool_page_put
     ASSERT(IS_VALID_PAGE(pi));
     _tmh_free_page_thispool(pi);
 }
-#endif
 
 /******************  XEN-SPECIFIC CLIENT HANDLING ********************/
 
@@ -413,7 +411,6 @@ EXPORT tmh_client_t *tmh_client_init(cli
     for (i = 0, shift = 12; i < 4; shift -=4, i++)
         name[i] = (((unsigned short)cli_id >> shift) & 0xf) + '0';
     name[4] = '\0';
-#ifndef __i386__
     tmh->persistent_pool = xmem_pool_create(name, tmh_persistent_pool_page_get,
         tmh_persistent_pool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
     if ( tmh->persistent_pool == NULL )
@@ -421,23 +418,18 @@ EXPORT tmh_client_t *tmh_client_init(cli
         xfree(tmh);
         return NULL;
     }
-#endif
     return tmh;
 }
 
 EXPORT void tmh_client_destroy(tmh_client_t *tmh)
 {
     ASSERT(tmh->domain->is_dying);
-#ifndef __i386__
     xmem_pool_destroy(tmh->persistent_pool);
-#endif
     tmh->domain = NULL;
 }
 
 /******************  XEN-SPECIFIC HOST INITIALIZATION ********************/
 
-#ifndef __i386__
-
 static int dstmem_order, workmem_order;
 
 static int cpu_callback(
@@ -517,12 +509,3 @@ EXPORT int __init tmh_init(void)
 
     return 1;
 }
-
-#else
-
-EXPORT int __init tmh_init(void)
-{
-    return 1;
-}
-
-#endif
diff -r 05d82fb18335 -r bc8cb4778702 xen/common/wait.c
--- a/xen/common/wait.c Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/common/wait.c Wed Sep 12 13:29:30 2012 +0100
@@ -140,7 +140,6 @@ static void __prepare_to_wait(struct wai
     }
 
     asm volatile (
-#ifdef CONFIG_X86_64
         "push %%rax; push %%rbx; push %%rdx; "
         "push %%rbp; push %%r8; push %%r9; push %%r10; push %%r11; "
         "push %%r12; push %%r13; push %%r14; push %%r15; call 1f; "
@@ -151,15 +150,6 @@ static void __prepare_to_wait(struct wai
         "pop %%r15; pop %%r14; pop %%r13; pop %%r12; "
         "pop %%r11; pop %%r10; pop %%r9; pop %%r8; "
         "pop %%rbp; pop %%rdx; pop %%rbx; pop %%rax"
-#else
-        "push %%eax; push %%ebx; push %%edx; "
-        "push %%ebp; call 1f; "
-        "1: mov %%esp,%%esi; addl $2f-1b,(%%esp); "
-        "sub %%esi,%%ecx; cmp %3,%%ecx; jbe 2f; "
-        "xor %%esi,%%esi; jmp 3f; "
-        "2: rep movsb; mov %%esp,%%esi; 3: pop %%eax; "
-        "pop %%ebp; pop %%edx; pop %%ebx; pop %%eax"
-#endif
         : "=&S" (wqv->esp), "=&c" (dummy), "=&D" (dummy)
         : "i" (PAGE_SIZE), "1" (cpu_info), "2" (wqv->stack)
         : "memory" );
diff -r 05d82fb18335 -r bc8cb4778702 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/drivers/passthrough/vtd/iommu.c       Wed Sep 12 13:29:30 2012 +0100
@@ -880,11 +880,9 @@ static int iommu_page_fault_do_one(struc
                 seg, (source_id >> 8), PCI_SLOT(source_id & 0xFF),
                 PCI_FUNC(source_id & 0xFF), addr, iommu->reg,
                 fault_reason, reason);
-#ifndef __i386__ /* map_domain_page() cannot be used in this context */
        if (iommu_debug)
             print_vtd_entries(iommu, (source_id >> 8),
                           (source_id & 0xff), (addr >> PAGE_SHIFT));
-#endif
     }
     else
         INTEL_IOMMU_DEBUG(
diff -r 05d82fb18335 -r bc8cb4778702 xen/include/asm-x86/amd.h
--- a/xen/include/asm-x86/amd.h Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/include/asm-x86/amd.h Wed Sep 12 13:29:30 2012 +0100
@@ -146,11 +146,9 @@
 struct cpuinfo_x86;
 int cpu_has_amd_erratum(const struct cpuinfo_x86 *, int, ...);
 
-#ifdef __x86_64__
 extern s8 opt_allow_unsafe;
 
 void fam10h_check_enable_mmcfg(void);
 void check_enable_amd_mmconf_dmi(void);
-#endif
 
 #endif /* __AMD_H__ */
diff -r 05d82fb18335 -r bc8cb4778702 xen/include/asm-x86/apicdef.h
--- a/xen/include/asm-x86/apicdef.h     Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/include/asm-x86/apicdef.h     Wed Sep 12 13:29:30 2012 +0100
@@ -129,11 +129,7 @@
 /* It's only used in x2APIC mode of an x2APIC unit. */
 #define APIC_MSR_BASE 0x800
 
-#ifdef __i386__
- #define MAX_IO_APICS 64
-#else
- #define MAX_IO_APICS 128
-#endif
+#define MAX_IO_APICS 128
 
 /*
  * the local APIC register structure, memory mapped. Not terribly well
diff -r 05d82fb18335 -r bc8cb4778702 xen/include/asm-x86/asm_defns.h
--- a/xen/include/asm-x86/asm_defns.h   Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/include/asm-x86/asm_defns.h   Wed Sep 12 13:29:30 2012 +0100
@@ -12,11 +12,7 @@
 void ret_from_intr(void);
 #endif
 
-#ifdef __x86_64__
 #include <asm/x86_64/asm_defns.h>
-#else
-#include <asm/x86_32/asm_defns.h>
-#endif
 
 /* Exception table entry */
 #ifdef __ASSEMBLY__
diff -r 05d82fb18335 -r bc8cb4778702 xen/include/asm-x86/atomic.h
--- a/xen/include/asm-x86/atomic.h      Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/include/asm-x86/atomic.h      Wed Sep 12 13:29:30 2012 +0100
@@ -22,22 +22,8 @@ build_write_atomic(write_u8_atomic, "b",
 build_write_atomic(write_u16_atomic, "w", uint16_t, "r", )
 build_write_atomic(write_u32_atomic, "l", uint32_t, "r", )
 
-#ifdef __x86_64__
 build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
 build_write_atomic(write_u64_atomic, "q", uint64_t, "r", )
-#else
-static inline uint64_t read_u64_atomic(const volatile uint64_t *addr)
-{
-    uint64_t *__addr = (uint64_t *)addr;
-    return __cmpxchg8b(__addr, 0, 0);
-}
-static inline void write_u64_atomic(volatile uint64_t *addr, uint64_t val)
-{
-    uint64_t old = *addr, new, *__addr = (uint64_t *)addr;
-    while ( (new = __cmpxchg8b(__addr, old, val)) != old )
-        old = new;
-}
-#endif
 
 #undef build_read_atomic
 #undef build_write_atomic
diff -r 05d82fb18335 -r bc8cb4778702 xen/include/asm-x86/bug.h
--- a/xen/include/asm-x86/bug.h Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/include/asm-x86/bug.h Wed Sep 12 13:29:30 2012 +0100
@@ -1,11 +1,7 @@
 #ifndef __X86_BUG_H__
 #define __X86_BUG_H__
 
-#ifdef __x86_64__
 #include <asm/x86_64/bug.h>
-#else
-#include <asm/x86_32/bug.h>
-#endif
 
 struct bug_frame {
     unsigned char ud2[2];
diff -r 05d82fb18335 -r bc8cb4778702 xen/include/asm-x86/config.h
--- a/xen/include/asm-x86/config.h      Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/include/asm-x86/config.h      Wed Sep 12 13:29:30 2012 +0100
@@ -7,13 +7,8 @@
 #ifndef __X86_CONFIG_H__
 #define __X86_CONFIG_H__
 
-#if defined(__x86_64__)
-# define LONG_BYTEORDER 3
-# define CONFIG_PAGING_LEVELS 4
-#else
-# define LONG_BYTEORDER 2
-# define CONFIG_PAGING_LEVELS 3
-#endif
+#define LONG_BYTEORDER 3
+#define CONFIG_PAGING_LEVELS 4
 
 #define BYTES_PER_LONG (1 << LONG_BYTEORDER)
 #define BITS_PER_LONG (BYTES_PER_LONG << 3)
@@ -56,17 +51,10 @@
 
 #ifdef MAX_PHYS_CPUS
 #define NR_CPUS MAX_PHYS_CPUS
-#elif defined __i386__
-#define NR_CPUS 128
 #else
 #define NR_CPUS 256
 #endif
 
-#ifdef __i386__
-/* Maximum number of virtual CPUs in multi-processor guests. */
-#define MAX_VIRT_CPUS XEN_LEGACY_MAX_VCPUS
-#endif
-
 /* Maximum we can support with current vLAPIC ID mapping. */
 #define MAX_HVM_VCPUS 128
 
@@ -94,11 +82,7 @@
 #define MEMORY_GUARD
 #endif
 
-#ifdef __i386__
-#define STACK_ORDER 2
-#else
 #define STACK_ORDER 3
-#endif
 #define STACK_SIZE  (PAGE_SIZE << STACK_ORDER)
 
 /* Primary stack is restricted to 8kB by guard pages. */
@@ -123,8 +107,6 @@ extern unsigned char boot_edid_info[128]
 
 #define asmlinkage
 
-#if defined(__x86_64__)
-
 #define CONFIG_X86_64 1
 #define CONFIG_COMPAT 1
 
@@ -286,86 +268,8 @@ extern unsigned char boot_edid_info[128]
 #define __OS          "q"  /* Operation Suffix */
 #define __OP          "r"  /* Operand Prefix */
 
-#elif defined(__i386__)
-
-#define CONFIG_X86_32      1
-#define CONFIG_DOMAIN_PAGE 1
-
-/*
- * Memory layout (high to low):                          PAE-SIZE
- *                                                       ------
- *  I/O remapping area                                   ( 4MB)
- *  Direct-map (1:1) area [Xen code/data/heap]           (12MB)
- *  Per-domain mappings (inc. 4MB map_domain_page cache) ( 8MB)
- *  Shadow linear pagetable                              ( 8MB)
- *  Guest linear pagetable                               ( 8MB)
- *  Machine-to-physical translation table [writable]     (16MB)
- *  Frame-info table                                     (96MB)
- *   * Start of guest inaccessible area
- *  Machine-to-physical translation table [read-only]    (16MB)
- *   * Start of guest unmodifiable area
- */
-
-#define IOREMAP_MBYTES           4
-#define DIRECTMAP_MBYTES        12
-#define MAPCACHE_MBYTES          4
-#define PERDOMAIN_MBYTES         8
-
-#define LINEARPT_MBYTES          8
-#define MACHPHYS_MBYTES         16 /* 1 MB needed per 1 GB memory */
-#define FRAMETABLE_MBYTES       (MACHPHYS_MBYTES * 6)
-
-#define IOREMAP_VIRT_END       _AC(0,UL)
-#define IOREMAP_VIRT_START     (IOREMAP_VIRT_END - (IOREMAP_MBYTES<<20))
-#define DIRECTMAP_VIRT_END     IOREMAP_VIRT_START
-#define DIRECTMAP_VIRT_START   (DIRECTMAP_VIRT_END - (DIRECTMAP_MBYTES<<20))
-#define MAPCACHE_VIRT_END      DIRECTMAP_VIRT_START
-#define MAPCACHE_VIRT_START    (MAPCACHE_VIRT_END - (MAPCACHE_MBYTES<<20))
-#define PERDOMAIN_VIRT_END     DIRECTMAP_VIRT_START
-#define PERDOMAIN_VIRT_START   (PERDOMAIN_VIRT_END - (PERDOMAIN_MBYTES<<20))
-#define SH_LINEAR_PT_VIRT_END  PERDOMAIN_VIRT_START
-#define SH_LINEAR_PT_VIRT_START        (SH_LINEAR_PT_VIRT_END - 
(LINEARPT_MBYTES<<20))
-#define LINEAR_PT_VIRT_END     SH_LINEAR_PT_VIRT_START
-#define LINEAR_PT_VIRT_START   (LINEAR_PT_VIRT_END - (LINEARPT_MBYTES<<20))
-#define RDWR_MPT_VIRT_END      LINEAR_PT_VIRT_START
-#define RDWR_MPT_VIRT_START    (RDWR_MPT_VIRT_END - (MACHPHYS_MBYTES<<20))
-#define FRAMETABLE_VIRT_END    RDWR_MPT_VIRT_START
-#define FRAMETABLE_SIZE         (FRAMETABLE_MBYTES<<20)
-#define FRAMETABLE_VIRT_START  (FRAMETABLE_VIRT_END - FRAMETABLE_SIZE)
-#define RO_MPT_VIRT_END                FRAMETABLE_VIRT_START
-#define RO_MPT_VIRT_START      (RO_MPT_VIRT_END - (MACHPHYS_MBYTES<<20))
-
-#define DIRECTMAP_PHYS_END     (DIRECTMAP_MBYTES<<20)
-
-/* Maximum linear address accessible via guest memory segments. */
-#define GUEST_SEGMENT_MAX_ADDR  RO_MPT_VIRT_END
-
-/* Hypervisor owns top 168MB of virtual address space. */
-#define HYPERVISOR_VIRT_START   mk_unsigned_long(0xF5800000)
-
-#define L2_PAGETABLE_FIRST_XEN_SLOT \
-    (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
-#define L2_PAGETABLE_LAST_XEN_SLOT  \
-    (~0UL >> L2_PAGETABLE_SHIFT)
-#define L2_PAGETABLE_XEN_SLOTS \
-    (L2_PAGETABLE_LAST_XEN_SLOT - L2_PAGETABLE_FIRST_XEN_SLOT + 1)
-
-#define PGT_base_page_table     PGT_l3_page_table
-
-#define __HYPERVISOR_CS 0xe008
-#define __HYPERVISOR_DS 0xe010
-
-/* For generic assembly code: use macros to define operation/operand sizes. */
-#define __OS          "l"  /* Operation Suffix */
-#define __OP          "e"  /* Operand Prefix */
-
-#endif /* __i386__ */
-
 #ifndef __ASSEMBLY__
 extern unsigned long xen_phys_start;
-#if defined(__i386__)
-extern unsigned long xenheap_phys_end;
-#endif
 #endif
 
 /* GDT/LDT shadow mapping area. The first per-domain-mapping sub-area. */
@@ -391,11 +295,7 @@ extern unsigned long xenheap_phys_end;
 #define PDPT_L2_ENTRIES       \
     ((PDPT_L1_ENTRIES + (1 << PAGETABLE_ORDER) - 1) >> PAGETABLE_ORDER)
 
-#if defined(__x86_64__)
 #define ELFSIZE 64
-#else
-#define ELFSIZE 32
-#endif
 
 #define ARCH_CRASH_SAVE_VMCOREINFO
 
diff -r 05d82fb18335 -r bc8cb4778702 xen/include/asm-x86/cpufeature.h
--- a/xen/include/asm-x86/cpufeature.h  Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/include/asm-x86/cpufeature.h  Wed Sep 12 13:29:30 2012 +0100
@@ -165,33 +165,6 @@
 #define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
 #define CPUID5_ECX_INTERRUPT_BREAK      0x2
 
-#ifdef __i386__
-#define cpu_has_vme            boot_cpu_has(X86_FEATURE_VME)
-#define cpu_has_de             boot_cpu_has(X86_FEATURE_DE)
-#define cpu_has_pse            boot_cpu_has(X86_FEATURE_PSE)
-#define cpu_has_tsc            boot_cpu_has(X86_FEATURE_TSC)
-#define cpu_has_pge            boot_cpu_has(X86_FEATURE_PGE)
-#define cpu_has_pat            boot_cpu_has(X86_FEATURE_PAT)
-#define cpu_has_apic           boot_cpu_has(X86_FEATURE_APIC)
-#define cpu_has_sep            boot_cpu_has(X86_FEATURE_SEP)
-#define cpu_has_mtrr           boot_cpu_has(X86_FEATURE_MTRR)
-#define cpu_has_mmx            boot_cpu_has(X86_FEATURE_MMX)
-#define cpu_has_fxsr           boot_cpu_has(X86_FEATURE_FXSR)
-#define cpu_has_xmm            boot_cpu_has(X86_FEATURE_XMM)
-#define cpu_has_xmm2           boot_cpu_has(X86_FEATURE_XMM2)
-#define cpu_has_xmm3           boot_cpu_has(X86_FEATURE_XMM3)
-#define cpu_has_ht             boot_cpu_has(X86_FEATURE_HT)
-#define cpu_has_syscall                boot_cpu_has(X86_FEATURE_SYSCALL)
-#define cpu_has_mp             boot_cpu_has(X86_FEATURE_MP)
-#define cpu_has_nx             boot_cpu_has(X86_FEATURE_NX)
-#define cpu_has_k6_mtrr                boot_cpu_has(X86_FEATURE_K6_MTRR)
-#define cpu_has_cyrix_arr      boot_cpu_has(X86_FEATURE_CYRIX_ARR)
-#define cpu_has_centaur_mcr    boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
-#define cpu_has_clflush                boot_cpu_has(X86_FEATURE_CLFLSH)
-#define cpu_has_page1gb                0
-#define cpu_has_efer           (boot_cpu_data.x86_capability[1] & 0x20100800)
-#define cpu_has_fsgsbase       0
-#else /* __x86_64__ */
 #define cpu_has_vme            0
 #define cpu_has_de             1
 #define cpu_has_pse            1
@@ -217,7 +190,6 @@
 #define cpu_has_page1gb                boot_cpu_has(X86_FEATURE_PAGE1GB)
 #define cpu_has_efer           1
 #define cpu_has_fsgsbase       boot_cpu_has(X86_FEATURE_FSGSBASE)
-#endif
 
 #define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
 
diff -r 05d82fb18335 -r bc8cb4778702 xen/include/asm-x86/current.h
--- a/xen/include/asm-x86/current.h     Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/include/asm-x86/current.h     Wed Sep 12 13:29:30 2012 +0100
@@ -19,9 +19,8 @@ struct cpu_info {
     unsigned int processor_id;
     struct vcpu *current_vcpu;
     unsigned long per_cpu_offset;
-#ifdef __x86_64__ /* get_stack_bottom() must be 16-byte aligned */
+    /* get_stack_bottom() must be 16-byte aligned */
     unsigned long __pad_for_stack_bottom;
-#endif
 };
 
 static inline struct cpu_info *get_cpu_info(void)
diff -r 05d82fb18335 -r bc8cb4778702 xen/include/asm-x86/desc.h
--- a/xen/include/asm-x86/desc.h        Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/include/asm-x86/desc.h        Wed Sep 12 13:29:30 2012 +0100
@@ -18,8 +18,6 @@
 
 #define LDT_ENTRY_SIZE 8
 
-#if defined(__x86_64__)
-
 #define FLAT_COMPAT_RING1_CS 0xe019  /* GDT index 259 */
 #define FLAT_COMPAT_RING1_DS 0xe021  /* GDT index 260 */
 #define FLAT_COMPAT_RING1_SS 0xe021  /* GDT index 260 */
@@ -38,30 +36,9 @@
 #define LDT_ENTRY (TSS_ENTRY + 2)
 #define PER_CPU_GDT_ENTRY (LDT_ENTRY + 2)
 
-#elif defined(__i386__)
-
-#define FLAT_COMPAT_KERNEL_CS FLAT_KERNEL_CS
-#define FLAT_COMPAT_KERNEL_DS FLAT_KERNEL_DS
-#define FLAT_COMPAT_KERNEL_SS FLAT_KERNEL_SS
-#define FLAT_COMPAT_USER_CS   FLAT_USER_CS
-#define FLAT_COMPAT_USER_DS   FLAT_USER_DS
-#define FLAT_COMPAT_USER_SS   FLAT_USER_SS
-
-#define DOUBLEFAULT_TSS_ENTRY FIRST_RESERVED_GDT_ENTRY
-
-#define TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8)
-#define LDT_ENTRY (TSS_ENTRY + 1)
-#define PER_CPU_GDT_ENTRY (LDT_ENTRY + 1)
-
-#endif
-
 #ifndef __ASSEMBLY__
 
-#if defined(__x86_64__)
 #define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
-#elif defined(__i386__)
-#define GUEST_KERNEL_RPL(d) ((void)(d), 1)
-#endif
 
 /* Fix up the RPL of a guest segment selector. */
 #define __fixup_guest_selector(d, sel)                             \
@@ -115,11 +92,7 @@
 #define _SEGMENT_S       ( 1<<12) /* System descriptor (yes iff S==0) */
 #define _SEGMENT_DPL     ( 3<<13) /* Descriptor Privilege Level */
 #define _SEGMENT_P       ( 1<<15) /* Segment Present */
-#ifdef __x86_64__
 #define _SEGMENT_L       ( 1<<21) /* 64-bit segment */
-#else
-#define _SEGMENT_L       0
-#endif
 #define _SEGMENT_DB      ( 1<<22) /* 16- or 32-bit segment */
 #define _SEGMENT_G       ( 1<<23) /* Granularity */
 
@@ -129,8 +102,6 @@ struct desc_struct {
     u32 a, b;
 };
 
-#if defined(__x86_64__)
-
 typedef struct {
     u64 a, b;
 } idt_entry_t;
@@ -165,40 +136,6 @@ do {                                    
         (((u32)(addr) & 0x00FF0000U) >> 16);             \
 } while (0)
 
-#elif defined(__i386__)
-
-typedef struct desc_struct idt_entry_t;
-
-#define _set_gate(gate_addr,type,dpl,addr)               \
-do {                                                     \
-    (gate_addr)->b = 0;                                  \
-    wmb(); /* disable gate /then/ rewrite */             \
-    (gate_addr)->a =                                     \
-        ((unsigned long)(addr) & 0xFFFFUL) |             \
-        ((unsigned long)__HYPERVISOR_CS << 16);          \
-    wmb(); /* rewrite /then/ enable gate */              \
-    (gate_addr)->b =                                     \
-        ((unsigned long)(addr) & 0xFFFF0000UL) |         \
-        ((unsigned long)(dpl) << 13) |                   \
-        ((unsigned long)(type) << 8) |                   \
-        (1UL << 15);                                     \
-} while (0)
-
-#define _set_tssldt_desc(desc,addr,limit,type)           \
-do {                                                     \
-    (desc)->b = 0;                                       \
-    wmb(); /* disable entry /then/ rewrite */            \
-    (desc)->a =                                          \
-        ((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF);   \
-    wmb(); /* rewrite /then/ enable entry */             \
-    (desc)->b =                                          \
-        ((u32)(addr) & 0xFF000000U) |                    \
-        ((u32)(type) << 8) | 0x8000U |                   \
-        (((u32)(addr) & 0x00FF0000U) >> 16);             \
-} while (0)
-
-#endif
-
 struct desc_ptr {
        unsigned short limit;
        unsigned long base;
diff -r 05d82fb18335 -r bc8cb4778702 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Wed Sep 12 13:24:28 2012 +0200
+++ b/xen/include/asm-x86/domain.h      Wed Sep 12 13:29:30 2012 +0100
@@ -13,11 +13,7 @@
 #define has_32bit_shinfo(d)    ((d)->arch.has_32bit_shinfo)
 #define is_pv_32bit_domain(d)  ((d)->arch.is_32bit_pv)
 #define is_pv_32bit_vcpu(v)    (is_pv_32bit_domain((v)->domain))
-#ifdef __x86_64__
 #define is_pv_32on64_domain(d) (is_pv_32bit_domain(d))
-#else
-#define is_pv_32on64_domain(d) (0)
-#endif
 #define is_pv_32on64_vcpu(v)   (is_pv_32on64_domain((v)->domain))
 
 #define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \
@@ -243,18 +239,11 @@ struct pv_domain
 
 struct arch_domain
 {
-#ifdef CONFIG_X86_64
     struct page_info **mm_perdomain_pt_pages;
     l2_pgentry_t *mm_perdomain_l2;
     l3_pgentry_t *mm_perdomain_l3;
 
     unsigned int hv_compat_vstart;
-#else
-    l1_pgentry_t *mm_perdomain_pt;
-
-    /* map_domain_page() mapping cache. */
-    struct mapcache_domain mapcache;
-#endif
 
     bool_t s3_integrity;
 
@@ -330,7 +319,6 @@ struct arch_domain
 #define has_arch_pdevs(d)    (!list_empty(&(d)->arch.pdev_list))
 #define has_arch_mmios(d)    (!rangeset_is_empty((d)->iomem_caps))
 
-#ifdef CONFIG_X86_64
 #define perdomain_pt_pgidx(v) \
       ((v)->vcpu_id >> (PAGETABLE_ORDER - GDT_LDT_VCPU_SHIFT))
 #define perdomain_ptes(d, v) \
@@ -338,32 +326,6 @@ struct arch_domain
       [perdomain_pt_pgidx(v)]) + (((v)->vcpu_id << GDT_LDT_VCPU_SHIFT) & \
                                   (L1_PAGETABLE_ENTRIES - 1)))
 #define perdomain_pt_page(d, n) ((d)->arch.mm_perdomain_pt_pages[n])
-#else
-#define perdomain_ptes(d, v) \
-    ((d)->arch.mm_perdomain_pt + ((v)->vcpu_id << GDT_LDT_VCPU_SHIFT))
-#define perdomain_pt_page(d, n) \
-    (virt_to_page((d)->arch.mm_perdomain_pt) + (n))
-#endif
-
-
-#ifdef __i386__
-struct pae_l3_cache {
-    /*
-     * Two low-memory (<4GB) PAE L3 tables, used as fallback when the guest
-     * supplies a >=4GB PAE L3 table. We need two because we cannot set up
-     * an L3 table while we are currently running on it (without using
-     * expensive atomic 64-bit operations).
-     */
-    l3_pgentry_t  table[2][4] __attribute__((__aligned__(32)));
-    unsigned long high_mfn;  /* The >=4GB MFN being shadowed. */
-    unsigned int  inuse_idx; /* Which of the two cache slots is in use? */
-    spinlock_t    lock;
-};
-#define pae_l3_cache_init(c) spin_lock_init(&(c)->lock)
-#else /* !defined(__i386__) */
-struct pae_l3_cache { };
-#define pae_l3_cache_init(c) ((void)0)
-#endif
 
 struct pv_vcpu
 {
@@ -379,9 +341,7 @@ struct pv_vcpu
     unsigned long event_callback_eip;
     unsigned long failsafe_callback_eip;
     union {
-#ifdef CONFIG_X86_64
         unsigned long syscall_callback_eip;
-#endif
         struct {
             unsigned int event_callback_cs;
             unsigned int failsafe_callback_cs;
@@ -390,7 +350,6 @@ struct pv_vcpu
 
     unsigned long vm_assist;
 
-#ifdef CONFIG_X86_64
     unsigned long syscall32_callback_eip;
     unsigned long sysenter_callback_eip;
     unsigned short syscall32_callback_cs;
@@ -402,15 +361,10 @@ struct pv_vcpu
     unsigned long fs_base;
     unsigned long gs_base_kernel;
     unsigned long gs_base_user;
-#endif
 
     /* Bounce information for propagating an exception to guest OS. */
     struct trap_bounce trap_bounce;
-#ifdef CONFIG_X86_64
     struct trap_bounce int80_bounce;
-#else
-    struct desc_struct int80_desc;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.