[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Re-indent vmx code.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 3feb7fa331edc8f1f49223027bbde9ae695c4d38
# Parent  4508c22dc45839721807753c373916c6601d74e9
Re-indent vmx code.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 4508c22dc458 -r 3feb7fa331ed xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     Sun Sep 11 16:36:24 2005
+++ b/xen/arch/x86/shadow.c     Sun Sep 11 16:44:23 2005
@@ -54,7 +54,7 @@
 static void shadow_map_into_current(struct vcpu *v,
     unsigned long va, unsigned int from, unsigned int to);
 static inline void validate_bl2e_change( struct domain *d,
-       guest_root_pgentry_t *new_gle_p, pgentry_64_t *shadow_l3, int index);
+    guest_root_pgentry_t *new_gle_p, pgentry_64_t *shadow_l3, int index);
 
 #endif
 
diff -r 4508c22dc458 -r 3feb7fa331ed xen/arch/x86/shadow_public.c
--- a/xen/arch/x86/shadow_public.c      Sun Sep 11 16:36:24 2005
+++ b/xen/arch/x86/shadow_public.c      Sun Sep 11 16:44:23 2005
@@ -54,24 +54,24 @@
     switch(levels) {
 #if CONFIG_PAGING_LEVELS >= 4
     case 4:
-       if ( d->arch.ops != &MODE_F_HANDLER )
-           d->arch.ops = &MODE_F_HANDLER;
-       shadow_unlock(d);
+        if ( d->arch.ops != &MODE_F_HANDLER )
+            d->arch.ops = &MODE_F_HANDLER;
+        shadow_unlock(d);
         return 1;
 #endif
     case 3:
     case 2:
 #if CONFIG_PAGING_LEVELS == 2
-       if ( d->arch.ops != &MODE_A_HANDLER )
-           d->arch.ops = &MODE_A_HANDLER;
+        if ( d->arch.ops != &MODE_A_HANDLER )
+            d->arch.ops = &MODE_A_HANDLER;
 #elif CONFIG_PAGING_LEVELS == 4
-       if ( d->arch.ops != &MODE_D_HANDLER )
-           d->arch.ops = &MODE_D_HANDLER;
+        if ( d->arch.ops != &MODE_D_HANDLER )
+            d->arch.ops = &MODE_D_HANDLER;
 #endif
-       shadow_unlock(d);
+        shadow_unlock(d);
         return 1;
-   default:
-       shadow_unlock(d);
+    default:
+        shadow_unlock(d);
         return 0;
     }
 }
@@ -115,10 +115,10 @@
 
 struct out_of_sync_entry *
 shadow_mark_mfn_out_of_sync(struct vcpu *v, unsigned long gpfn,
-                             unsigned long mfn)
-{
-   struct domain *d = v->domain;
-   return d->arch.ops->mark_mfn_out_of_sync(v, gpfn, mfn);
+                            unsigned long mfn)
+{
+    struct domain *d = v->domain;
+    return d->arch.ops->mark_mfn_out_of_sync(v, gpfn, mfn);
 }
 
 /*
@@ -181,7 +181,7 @@
     l4_pgentry_t *mpl4e;
     struct pfn_info *mmfn_info;
     struct domain *d = v->domain;
-     pagetable_t phys_table;
+    pagetable_t phys_table;
 
     ASSERT(!pagetable_get_paddr(v->arch.monitor_table)); /* we should only get 
called once */
 
@@ -192,13 +192,13 @@
     mpl4e = (l4_pgentry_t *) map_domain_page(mmfn);
     memcpy(mpl4e, &idle_pg_table[0], PAGE_SIZE);
     mpl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
-      l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
+        l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
     /* map the phys_to_machine map into the per domain Read-Only MPT space */
     phys_table = page_table_convert(d);
 
     mpl4e[l4_table_offset(RO_MPT_VIRT_START)] =
-       l4e_from_paddr(pagetable_get_paddr(phys_table),
-         __PAGE_HYPERVISOR);
+        l4e_from_paddr(pagetable_get_paddr(phys_table),
+                       __PAGE_HYPERVISOR);
     v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
     v->arch.monitor_vtable = (l2_pgentry_t *) mpl4e;
 }
@@ -245,7 +245,7 @@
         for ( i = 0; i < PAGETABLE_ENTRIES; i++ )
             if ( external || is_guest_l4_slot(i) )
                 if ( entry_get_flags(ple[i]) & _PAGE_PRESENT )
-                        put_shadow_ref(entry_get_pfn(ple[i]));
+                    put_shadow_ref(entry_get_pfn(ple[i]));
 
         unmap_domain_page(ple);
     }
@@ -306,12 +306,12 @@
 
     mpl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
         l2e_from_paddr(__pa(d->arch.mm_perdomain_pt),
-                        __PAGE_HYPERVISOR);
+                       __PAGE_HYPERVISOR);
 
     // map the phys_to_machine map into the Read-Only MPT space for this domain
     mpl2e[l2_table_offset(RO_MPT_VIRT_START)] =
         l2e_from_paddr(pagetable_get_paddr(d->arch.phys_table),
-                        __PAGE_HYPERVISOR);
+                       __PAGE_HYPERVISOR);
 
     // Don't (yet) have mappings for these...
     // Don't want to accidentally see the idle_pg_table's linear mapping.
@@ -365,7 +365,7 @@
     v->arch.monitor_table = mk_pagetable(0);
     v->arch.monitor_vtable = 0;
 }
-#endif 
+#endif 
 
 static void
 shadow_free_snapshot(struct domain *d, struct out_of_sync_entry *entry)
@@ -850,16 +850,16 @@
         perfc_decr(free_l1_pages);
 
         struct pfn_info *page = list_entry(list_ent, struct pfn_info, list);
-       if (d->arch.ops->guest_paging_levels == PAGING_L2)
-       {
+        if (d->arch.ops->guest_paging_levels == PAGING_L2)
+        {
 #if CONFIG_PAGING_LEVELS >=4
-        free_domheap_pages(page, SL1_ORDER);
+            free_domheap_pages(page, SL1_ORDER);
 #else
-       free_domheap_page(page);
+            free_domheap_page(page);
 #endif
-       }
-       else
-       free_domheap_page(page);
+        }
+        else
+            free_domheap_page(page);
     }
 
     shadow_audit(d, 0);
@@ -930,9 +930,9 @@
 
 #if defined(CONFIG_PAGING_LEVELS)
     if(!shadow_set_guest_paging_levels(d, 
-          CONFIG_PAGING_LEVELS)) {
-       printk("Unsupported guest paging levels\n");
-       domain_crash_synchronous(); /* need to take a clean path */
+                                       CONFIG_PAGING_LEVELS)) {
+        printk("Unsupported guest paging levels\n");
+        domain_crash_synchronous(); /* need to take a clean path */
     }
 #endif
 
@@ -1004,7 +1004,7 @@
             goto nomem;
 
         memset(d->arch.shadow_ht, 0,
-           shadow_ht_buckets * sizeof(struct shadow_status));
+               shadow_ht_buckets * sizeof(struct shadow_status));
     }
 
     if ( new_modes & SHM_log_dirty )
@@ -1013,7 +1013,7 @@
         d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63;
         d->arch.shadow_dirty_bitmap = 
             xmalloc_array(unsigned long, d->arch.shadow_dirty_bitmap_size /
-                                         (8 * sizeof(unsigned long)));
+                          (8 * sizeof(unsigned long)));
         if ( d->arch.shadow_dirty_bitmap == NULL )
         {
             d->arch.shadow_dirty_bitmap_size = 0;
@@ -1039,7 +1039,7 @@
             // external guests provide their own memory for their P2M maps.
             //
             ASSERT( d == page_get_owner(
-                        &frame_table[pagetable_get_pfn(d->arch.phys_table)]) );
+                &frame_table[pagetable_get_pfn(d->arch.phys_table)]) );
         }
     }
 
@@ -1188,9 +1188,9 @@
                           chunk : (d->max_pages - i)) + 7) / 8;
 
             if (copy_to_user(
-                    sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
-                    d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
-                    bytes))
+                sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
+                d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
+                bytes))
             {
                 // copy_to_user can fail when copying to guest app memory.
                 // app should zero buffer after mallocing, and pin it
@@ -1474,8 +1474,8 @@
 
         spl3e = (pgentry_64_t *) map_domain_page_with_cache(sl3mfn, cache);
         validate_entry_change(d, (pgentry_64_t *) &gpde,
-                             &spl3e[(pa & ~PAGE_MASK) / sizeof(l3_pgentry_t)], 
-                             shadow_type_to_level(PGT_l3_shadow));
+                              &spl3e[(pa & ~PAGE_MASK) / 
sizeof(l3_pgentry_t)], 
+                              shadow_type_to_level(PGT_l3_shadow));
         unmap_domain_page_with_cache(spl3e, cache);
     }
 
@@ -1502,8 +1502,8 @@
 
         spl4e = (pgentry_64_t *)map_domain_page_with_cache(sl4mfn, cache);
         validate_entry_change(d, (pgentry_64_t *)&gpde,
-                             &spl4e[(pa & ~PAGE_MASK) / sizeof(l4_pgentry_t)], 
-                             shadow_type_to_level(PGT_l4_shadow));
+                              &spl4e[(pa & ~PAGE_MASK) / 
sizeof(l4_pgentry_t)], 
+                              shadow_type_to_level(PGT_l4_shadow));
         unmap_domain_page_with_cache(spl4e, cache);
     }
 
@@ -1619,7 +1619,7 @@
 }
 
 static u32 remove_all_access_in_page(
-  struct domain *d, unsigned long l1mfn, unsigned long forbidden_gmfn)
+    struct domain *d, unsigned long l1mfn, unsigned long forbidden_gmfn)
 {
     l1_pgentry_t *pl1e = map_domain_page(l1mfn);
     l1_pgentry_t match;
@@ -1627,8 +1627,8 @@
     int i;
     u32 count = 0;
     int is_l1_shadow =
-      ((frame_table[l1mfn].u.inuse.type_info & PGT_type_mask) ==
-       PGT_l1_shadow);
+        ((frame_table[l1mfn].u.inuse.type_info & PGT_type_mask) ==
+         PGT_l1_shadow);
 
     match = l1e_from_pfn(forbidden_gmfn, flags);
 
@@ -1671,19 +1671,19 @@
         {
             switch (a->gpfn_and_flags & PGT_type_mask)
             {
-                case PGT_l1_shadow:
-                case PGT_l2_shadow:
-                case PGT_l3_shadow:
-                case PGT_l4_shadow:
-                case PGT_hl2_shadow:
-                    count += remove_all_access_in_page(d, a->smfn, 
forbidden_gmfn);
-                    break;
-                case PGT_snapshot:
-                case PGT_writable_pred:
-                    // these can't hold refs to the forbidden page
-                    break;
-                default:
-                    BUG();
+            case PGT_l1_shadow:
+            case PGT_l2_shadow:
+            case PGT_l3_shadow:
+            case PGT_l4_shadow:
+            case PGT_hl2_shadow:
+                count += remove_all_access_in_page(d, a->smfn, forbidden_gmfn);
+                break;
+            case PGT_snapshot:
+            case PGT_writable_pred:
+                // these can't hold refs to the forbidden page
+                break;
+            default:
+                BUG();
             }
 
             a = a->next;
@@ -1694,29 +1694,29 @@
 }
 
 void shadow_drop_references(
-  struct domain *d, struct pfn_info *page)
+    struct domain *d, struct pfn_info *page)
 {
     if ( likely(!shadow_mode_refcounts(d)) ||
-      ((page->u.inuse.type_info & PGT_count_mask) == 0) )
+         ((page->u.inuse.type_info & PGT_count_mask) == 0) )
         return;
 
     /* XXX This needs more thought... */
     printk("%s: needing to call __shadow_remove_all_access for mfn=%lx\n",
-      __func__, page_to_pfn(page));
+           __func__, page_to_pfn(page));
     printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
-      page->count_info, page->u.inuse.type_info);
+           page->count_info, page->u.inuse.type_info);
 
     shadow_lock(d);
     __shadow_remove_all_access(d, page_to_pfn(page));
     shadow_unlock(d);
 
     printk("After:  mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
-      page->count_info, page->u.inuse.type_info);
+           page->count_info, page->u.inuse.type_info);
 }
 
 /* XXX Needs more thought. Neither pretty nor fast: a place holder. */
 void shadow_sync_and_drop_references(
-  struct domain *d, struct pfn_info *page)
+    struct domain *d, struct pfn_info *page)
 {
     if ( likely(!shadow_mode_refcounts(d)) )
         return;
@@ -1730,3 +1730,13 @@
 
     shadow_unlock(d);
 }
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 4508c22dc458 -r 3feb7fa331ed xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c        Sun Sep 11 16:36:24 2005
+++ b/xen/arch/x86/vmx.c        Sun Sep 11 16:44:23 2005
@@ -122,37 +122,37 @@
     struct vcpu *vc = current;
     struct msr_state * msr = &vc->arch.arch_vmx.msr_content;
     switch(regs->ecx){
-        case MSR_EFER:
-            msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
-            VMX_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %llx\n", (unsigned long 
long)msr_content);
-            if (test_bit(VMX_CPU_STATE_LME_ENABLED,
-                          &vc->arch.arch_vmx.cpu_state))
-                msr_content |= 1 << _EFER_LME;
-
-            if (VMX_LONG_GUEST(vc))
-                msr_content |= 1 << _EFER_LMA;
-            break;
-        case MSR_FS_BASE:
-            if (!(VMX_LONG_GUEST(vc)))
-                /* XXX should it be GP fault */
-                domain_crash();
-            __vmread(GUEST_FS_BASE, &msr_content);
-            break;
-        case MSR_GS_BASE:
-            if (!(VMX_LONG_GUEST(vc)))
-                domain_crash();
-            __vmread(GUEST_GS_BASE, &msr_content);
-            break;
-        case MSR_SHADOW_GS_BASE:
-            msr_content = msr->shadow_gs;
-            break;
+    case MSR_EFER:
+        msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
+        VMX_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %llx\n", (unsigned long 
long)msr_content);
+        if (test_bit(VMX_CPU_STATE_LME_ENABLED,
+                     &vc->arch.arch_vmx.cpu_state))
+            msr_content |= 1 << _EFER_LME;
+
+        if (VMX_LONG_GUEST(vc))
+            msr_content |= 1 << _EFER_LMA;
+        break;
+    case MSR_FS_BASE:
+        if (!(VMX_LONG_GUEST(vc)))
+            /* XXX should it be GP fault */
+            domain_crash();
+        __vmread(GUEST_FS_BASE, &msr_content);
+        break;
+    case MSR_GS_BASE:
+        if (!(VMX_LONG_GUEST(vc)))
+            domain_crash();
+        __vmread(GUEST_GS_BASE, &msr_content);
+        break;
+    case MSR_SHADOW_GS_BASE:
+        msr_content = msr->shadow_gs;
+        break;
 
         CASE_READ_MSR(STAR);
         CASE_READ_MSR(LSTAR);
         CASE_READ_MSR(CSTAR);
         CASE_READ_MSR(SYSCALL_MASK);
-        default:
-            return 0;
+    default:
+        return 0;
     }
     VMX_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %lx\n", 
msr_content);
     regs->eax = msr_content & 0xffffffff;
@@ -166,68 +166,68 @@
     struct vcpu *vc = current;
     struct msr_state * msr = &vc->arch.arch_vmx.msr_content;
     struct msr_state * host_state = 
-               &percpu_msr[smp_processor_id()];
+        &percpu_msr[smp_processor_id()];
 
     VMX_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx msr_content %lx\n", 
                 regs->ecx, msr_content);
 
     switch (regs->ecx){
-        case MSR_EFER:
-            if ((msr_content & EFER_LME) ^
-                  test_bit(VMX_CPU_STATE_LME_ENABLED,
-                           &vc->arch.arch_vmx.cpu_state)){
-                if (test_bit(VMX_CPU_STATE_PG_ENABLED,
-                             &vc->arch.arch_vmx.cpu_state) ||
-                    !test_bit(VMX_CPU_STATE_PAE_ENABLED,
-                        &vc->arch.arch_vmx.cpu_state)){
-                     vmx_inject_exception(vc, TRAP_gp_fault, 0);
-                }
+    case MSR_EFER:
+        if ((msr_content & EFER_LME) ^
+            test_bit(VMX_CPU_STATE_LME_ENABLED,
+                     &vc->arch.arch_vmx.cpu_state)){
+            if (test_bit(VMX_CPU_STATE_PG_ENABLED,
+                         &vc->arch.arch_vmx.cpu_state) ||
+                !test_bit(VMX_CPU_STATE_PAE_ENABLED,
+                          &vc->arch.arch_vmx.cpu_state)){
+                vmx_inject_exception(vc, TRAP_gp_fault, 0);
             }
-            if (msr_content & EFER_LME)
-                set_bit(VMX_CPU_STATE_LME_ENABLED,
-                        &vc->arch.arch_vmx.cpu_state);
-            /* No update for LME/LMA since it have no effect */
-            msr->msr_items[VMX_INDEX_MSR_EFER] =
-                  msr_content;
-            if (msr_content & ~(EFER_LME | EFER_LMA)){
-                msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
-                if (!test_bit(VMX_INDEX_MSR_EFER, &msr->flags)){ 
-                    rdmsrl(MSR_EFER,
-                            host_state->msr_items[VMX_INDEX_MSR_EFER]);
-                      set_bit(VMX_INDEX_MSR_EFER, &host_state->flags);
-                      set_bit(VMX_INDEX_MSR_EFER, &msr->flags);  
-                      wrmsrl(MSR_EFER, msr_content);
-                }
+        }
+        if (msr_content & EFER_LME)
+            set_bit(VMX_CPU_STATE_LME_ENABLED,
+                    &vc->arch.arch_vmx.cpu_state);
+        /* No update for LME/LMA since it have no effect */
+        msr->msr_items[VMX_INDEX_MSR_EFER] =
+            msr_content;
+        if (msr_content & ~(EFER_LME | EFER_LMA)){
+            msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
+            if (!test_bit(VMX_INDEX_MSR_EFER, &msr->flags)){ 
+                rdmsrl(MSR_EFER,
+                       host_state->msr_items[VMX_INDEX_MSR_EFER]);
+                set_bit(VMX_INDEX_MSR_EFER, &host_state->flags);
+                set_bit(VMX_INDEX_MSR_EFER, &msr->flags);  
+                wrmsrl(MSR_EFER, msr_content);
             }
-            break;
-
-        case MSR_FS_BASE:
-        case MSR_GS_BASE:
-           if (!(VMX_LONG_GUEST(vc)))
-                domain_crash();
-           if (!IS_CANO_ADDRESS(msr_content)){
-               VMX_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
-               vmx_inject_exception(vc, TRAP_gp_fault, 0);
-           }
-           if (regs->ecx == MSR_FS_BASE)
-               __vmwrite(GUEST_FS_BASE, msr_content);
-           else 
-               __vmwrite(GUEST_GS_BASE, msr_content);
-           break;
-
-        case MSR_SHADOW_GS_BASE:
-           if (!(VMX_LONG_GUEST(vc)))
-               domain_crash();
-           vc->arch.arch_vmx.msr_content.shadow_gs = msr_content;
-           wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
-           break;
-
-           CASE_WRITE_MSR(STAR);
-           CASE_WRITE_MSR(LSTAR);
-           CASE_WRITE_MSR(CSTAR);
-           CASE_WRITE_MSR(SYSCALL_MASK);
-        default:
-            return 0;
+        }
+        break;
+
+    case MSR_FS_BASE:
+    case MSR_GS_BASE:
+        if (!(VMX_LONG_GUEST(vc)))
+            domain_crash();
+        if (!IS_CANO_ADDRESS(msr_content)){
+            VMX_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
+            vmx_inject_exception(vc, TRAP_gp_fault, 0);
+        }
+        if (regs->ecx == MSR_FS_BASE)
+            __vmwrite(GUEST_FS_BASE, msr_content);
+        else 
+            __vmwrite(GUEST_GS_BASE, msr_content);
+        break;
+
+    case MSR_SHADOW_GS_BASE:
+        if (!(VMX_LONG_GUEST(vc)))
+            domain_crash();
+        vc->arch.arch_vmx.msr_content.shadow_gs = msr_content;
+        wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
+        break;
+
+        CASE_WRITE_MSR(STAR);
+        CASE_WRITE_MSR(LSTAR);
+        CASE_WRITE_MSR(CSTAR);
+        CASE_WRITE_MSR(SYSCALL_MASK);
+    default:
+        return 0;
     }
     return 1;
 }
@@ -252,8 +252,8 @@
         i = find_first_set_bit(guest_flags);
 
         VMX_DBG_LOG(DBG_LEVEL_2,
-          "restore guest's index %d msr %lx with %lx\n",
-          i, (unsigned long) msr_data_index[i], (unsigned long) 
guest_state->msr_items[i]);
+                    "restore guest's index %d msr %lx with %lx\n",
+                    i, (unsigned long) msr_data_index[i], (unsigned long) 
guest_state->msr_items[i]);
         set_bit(i, &host_state->flags);
         wrmsrl(msr_data_index[i], guest_state->msr_items[i]);
         clear_bit(i, &guest_flags);
@@ -309,8 +309,8 @@
 
     if (eax & IA32_FEATURE_CONTROL_MSR_LOCK) {
         if ((eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0) {
-                printk("VMX disabled by Feature Control MSR.\n");
-                return 0;
+            printk("VMX disabled by Feature Control MSR.\n");
+            return 0;
         }
     }
     else {
@@ -320,16 +320,16 @@
     }
 
     if (!check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS, 
-            MSR_IA32_VMX_PINBASED_CTLS_MSR))
+                            MSR_IA32_VMX_PINBASED_CTLS_MSR))
         return 0;
     if (!check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS, 
-            MSR_IA32_VMX_PROCBASED_CTLS_MSR))
+                            MSR_IA32_VMX_PROCBASED_CTLS_MSR))
         return 0;
     if (!check_vmx_controls(MONITOR_VM_EXIT_CONTROLS, 
-            MSR_IA32_VMX_EXIT_CTLS_MSR))
+                            MSR_IA32_VMX_EXIT_CTLS_MSR))
         return 0;
     if (!check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS, 
-            MSR_IA32_VMX_ENTRY_CTLS_MSR))
+                            MSR_IA32_VMX_ENTRY_CTLS_MSR))
         return 0;
 
     set_in_cr4(X86_CR4_VMXE);   /* Enable VMXE */
@@ -385,8 +385,8 @@
     {
         __vmread(GUEST_RIP, &eip);
         VMX_DBG_LOG(DBG_LEVEL_VMMU, 
-                "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
-                va, eip, (unsigned long)regs->error_code);
+                    "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
+                    va, eip, (unsigned long)regs->error_code);
     }
 #endif
 
@@ -478,8 +478,8 @@
     regs->edx = (unsigned long) edx;
 
     VMX_DBG_LOG(DBG_LEVEL_1, 
-            "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, ebx=%x, 
ecx=%x, edx=%x",
-            eip, input, eax, ebx, ecx, edx);
+                "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, 
ebx=%x, ecx=%x, edx=%x",
+                eip, input, eax, ebx, ecx, edx);
 
 }
 
@@ -607,7 +607,7 @@
 }
 
 void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
-       unsigned long count, int size, long value, int dir, int pvalid)
+                  unsigned long count, int size, long value, int dir, int 
pvalid)
 {
     struct vcpu *v = current;
     vcpu_iodata_t *vio;
@@ -620,8 +620,8 @@
     }
 
     if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
-       printf("VMX I/O has not yet completed\n");
-       domain_crash_synchronous();
+        printf("VMX I/O has not yet completed\n");
+        domain_crash_synchronous();
     }
     set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
 
@@ -656,7 +656,7 @@
 }
 
 static void vmx_io_instruction(struct cpu_user_regs *regs, 
-                   unsigned long exit_qualification, unsigned long inst_len) 
+                               unsigned long exit_qualification, unsigned long 
inst_len) 
 {
     struct mi_per_cpu_info *mpcip;
     unsigned long eip, cs, eflags;
@@ -686,10 +686,10 @@
     dir = test_bit(3, &exit_qualification); /* direction */
 
     if (test_bit(4, &exit_qualification)) { /* string instruction */
-       unsigned long addr, count = 1;
-       int sign = regs->eflags & EF_DF ? -1 : 1;
-
-       __vmread(GUEST_LINEAR_ADDRESS, &addr);
+        unsigned long addr, count = 1;
+        int sign = regs->eflags & EF_DF ? -1 : 1;
+
+        __vmread(GUEST_LINEAR_ADDRESS, &addr);
 
         /*
          * In protected mode, guest linear address is invalid if the
@@ -699,35 +699,35 @@
             addr = dir == IOREQ_WRITE ? regs->esi : regs->edi;
 
         if (test_bit(5, &exit_qualification)) { /* "rep" prefix */
-           mpcip->flags |= REPZ;
-           count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
-       }
-
-       /*
-        * Handle string pio instructions that cross pages or that
-        * are unaligned. See the comments in vmx_platform.c/handle_mmio()
-        */
-       if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
-           unsigned long value = 0;
-
-           mpcip->flags |= OVERLAP;
-           if (dir == IOREQ_WRITE)
-               vmx_copy(&value, addr, size, VMX_COPY_IN);
-           send_pio_req(regs, port, 1, size, value, dir, 0);
-       } else {
-           if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) {
+            mpcip->flags |= REPZ;
+            count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
+        }
+
+        /*
+         * Handle string pio instructions that cross pages or that
+         * are unaligned. See the comments in vmx_platform.c/handle_mmio()
+         */
+        if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
+            unsigned long value = 0;
+
+            mpcip->flags |= OVERLAP;
+            if (dir == IOREQ_WRITE)
+                vmx_copy(&value, addr, size, VMX_COPY_IN);
+            send_pio_req(regs, port, 1, size, value, dir, 0);
+        } else {
+            if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) 
{
                 if (sign > 0)
                     count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
                 else
                     count = (addr & ~PAGE_MASK) / size;
-           } else
-               __update_guest_eip(inst_len);
-
-           send_pio_req(regs, port, count, size, addr, dir, 1);
-       }
+            } else
+                __update_guest_eip(inst_len);
+
+            send_pio_req(regs, port, count, size, addr, dir, 1);
+        }
     } else {
         __update_guest_eip(inst_len);
-       send_pio_req(regs, port, 1, size, regs->eax, dir, 0);
+        send_pio_req(regs, port, 1, size, regs->eax, dir, 0);
     }
 }
 
@@ -739,30 +739,30 @@
     int count;
 
     while (size > 0) {
-       count = PAGE_SIZE - (laddr & ~PAGE_MASK);
-       if (count > size)
-           count = size;
-
-       if (vmx_paging_enabled(current)) {
-               gpa = gva_to_gpa(laddr);
-               mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT);
-       } else
-               mfn = get_mfn_from_pfn(laddr >> PAGE_SHIFT);
-       if (mfn == INVALID_MFN)
-               return 0;
-
-       addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK);
-
-       if (dir == VMX_COPY_IN)
-           memcpy(buf, addr, count);
-       else
-           memcpy(addr, buf, count);
-
-       unmap_domain_page(addr);
-
-       laddr += count;
-       buf += count;
-       size -= count;
+        count = PAGE_SIZE - (laddr & ~PAGE_MASK);
+        if (count > size)
+            count = size;
+
+        if (vmx_paging_enabled(current)) {
+            gpa = gva_to_gpa(laddr);
+            mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT);
+        } else
+            mfn = get_mfn_from_pfn(laddr >> PAGE_SHIFT);
+        if (mfn == INVALID_MFN)
+            return 0;
+
+        addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK);
+
+        if (dir == VMX_COPY_IN)
+            memcpy(buf, addr, count);
+        else
+            memcpy(addr, buf, count);
+
+        unmap_domain_page(addr);
+
+        laddr += count;
+        buf += count;
+        size -= count;
     }
 
     return 1;
@@ -846,47 +846,47 @@
     error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
 
     if (!vmx_paging_enabled(d)) {
-       VMX_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
-       __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table));
+        VMX_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
+        __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table));
         goto skip_cr3;
     }
 
     if (c->cr3 == d->arch.arch_vmx.cpu_cr3) {
-       /* 
-        * This is simple TLB flush, implying the guest has 
-        * removed some translation or changed page attributes.
-        * We simply invalidate the shadow.
-        */
-       mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
-       if (mfn != pagetable_get_pfn(d->arch.guest_table)) {
-           printk("Invalid CR3 value=%x", c->cr3);
-           domain_crash_synchronous();
-           return 0;
-       }
-       shadow_sync_all(d->domain);
+        /* 
+         * This is simple TLB flush, implying the guest has 
+         * removed some translation or changed page attributes.
+         * We simply invalidate the shadow.
+         */
+        mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
+        if (mfn != pagetable_get_pfn(d->arch.guest_table)) {
+            printk("Invalid CR3 value=%x", c->cr3);
+            domain_crash_synchronous();
+            return 0;
+        }
+        shadow_sync_all(d->domain);
     } else {
-       /*
-        * If different, make a shadow. Check if the PDBR is valid
-        * first.
-        */
-       VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3);
-       if ((c->cr3 >> PAGE_SHIFT) > d->domain->max_pages) {
-           printk("Invalid CR3 value=%x", c->cr3);
-           domain_crash_synchronous(); 
-           return 0;
-       }
-       mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
-       d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
-       update_pagetables(d);
-       /* 
-        * arch.shadow_table should now hold the next CR3 for shadow
-        */
-       d->arch.arch_vmx.cpu_cr3 = c->cr3;
-       VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
-       __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
-    }
-
-skip_cr3:
+        /*
+         * If different, make a shadow. Check if the PDBR is valid
+         * first.
+         */
+        VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3);
+        if ((c->cr3 >> PAGE_SHIFT) > d->domain->max_pages) {
+            printk("Invalid CR3 value=%x", c->cr3);
+            domain_crash_synchronous(); 
+            return 0;
+        }
+        mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
+        d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
+        update_pagetables(d);
+        /* 
+         * arch.shadow_table should now hold the next CR3 for shadow
+         */
+        d->arch.arch_vmx.cpu_cr3 = c->cr3;
+        VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
+        __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
+    }
+
+ skip_cr3:
 
     error |= __vmread(CR4_READ_SHADOW, &old_cr4);
     error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
@@ -952,59 +952,59 @@
 
     /* make sure vmxassist exists (this is not an error) */
     if (!vmx_copy(&magic, VMXASSIST_MAGIC_OFFSET, sizeof(magic), VMX_COPY_IN))
-       return 0;
+        return 0;
     if (magic != VMXASSIST_MAGIC)
-       return 0;
+        return 0;
 
     switch (mode) {
-    /*
-     * Transfer control to vmxassist.
-     * Store the current context in VMXASSIST_OLD_CONTEXT and load
-     * the new VMXASSIST_NEW_CONTEXT context. This context was created
-     * by vmxassist and will transfer control to it.
-     */
+        /*
+         * Transfer control to vmxassist.
+         * Store the current context in VMXASSIST_OLD_CONTEXT and load
+         * the new VMXASSIST_NEW_CONTEXT context. This context was created
+         * by vmxassist and will transfer control to it.
+         */
     case VMX_ASSIST_INVOKE:
-       /* save the old context */
-       if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN))
-           goto error;
-       if (cp != 0) {
-           if (!vmx_world_save(d, &c))
-               goto error;
-           if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_OUT))
-               goto error;
-       }
-
-       /* restore the new context, this should activate vmxassist */
-       if (!vmx_copy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), VMX_COPY_IN))
-           goto error;
-       if (cp != 0) {
+        /* save the old context */
+        if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN))
+            goto error;
+        if (cp != 0) {
+            if (!vmx_world_save(d, &c))
+                goto error;
+            if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_OUT))
+                goto error;
+        }
+
+        /* restore the new context, this should activate vmxassist */
+        if (!vmx_copy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), VMX_COPY_IN))
+            goto error;
+        if (cp != 0) {
             if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN))
-               goto error;
-           if (!vmx_world_restore(d, &c))
-               goto error;
-           return 1;
-       }
-       break;
-
-    /*
-     * Restore the VMXASSIST_OLD_CONTEXT that was saved by VMX_ASSIST_INVOKE
-     * above.
-     */
+                goto error;
+            if (!vmx_world_restore(d, &c))
+                goto error;
+            return 1;
+        }
+        break;
+
+        /*
+         * Restore the VMXASSIST_OLD_CONTEXT that was saved by 
VMX_ASSIST_INVOKE
+         * above.
+         */
     case VMX_ASSIST_RESTORE:
-       /* save the old context */
-       if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN))
-           goto error;
-       if (cp != 0) {
+        /* save the old context */
+        if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN))
+            goto error;
+        if (cp != 0) {
             if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN))
-               goto error;
-           if (!vmx_world_restore(d, &c))
-               goto error;
-           return 1;
-       }
-       break;
-    }
-
-error:
+                goto error;
+            if (!vmx_world_restore(d, &c))
+                goto error;
+            return 1;
+        }
+        break;
+    }
+
+ error:
     printf("Failed to transfer to vmxassist\n");
     domain_crash_synchronous(); 
     return 0;
@@ -1031,7 +1031,7 @@
          * The guest CR3 must be pointing to the guest physical.
          */
         if ( !VALID_MFN(mfn = get_mfn_from_pfn(
-                            d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
+            d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
              !get_page(pfn_to_page(mfn), d->domain) )
         {
             printk("Invalid CR3 value = %lx", d->arch.arch_vmx.cpu_cr3);
@@ -1040,18 +1040,18 @@
 
 #if defined(__x86_64__)
         if (test_bit(VMX_CPU_STATE_LME_ENABLED,
-              &d->arch.arch_vmx.cpu_state) &&
-          !test_bit(VMX_CPU_STATE_PAE_ENABLED,
-              &d->arch.arch_vmx.cpu_state)){
+                     &d->arch.arch_vmx.cpu_state) &&
+            !test_bit(VMX_CPU_STATE_PAE_ENABLED,
+                      &d->arch.arch_vmx.cpu_state)){
             VMX_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n");
             vmx_inject_exception(d, TRAP_gp_fault, 0);
         }
         if (test_bit(VMX_CPU_STATE_LME_ENABLED,
-              &d->arch.arch_vmx.cpu_state)){
+                     &d->arch.arch_vmx.cpu_state)){
             /* Here the PAE is should to be opened */
             VMX_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n");
             set_bit(VMX_CPU_STATE_LMA_ENABLED,
-              &d->arch.arch_vmx.cpu_state);
+                    &d->arch.arch_vmx.cpu_state);
             __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
             vm_entry_value |= VM_ENTRY_CONTROLS_IA32E_MODE;
             __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
@@ -1073,17 +1073,17 @@
 #endif
         }
 
-       unsigned long crn;
+        unsigned long crn;
         /* update CR4's PAE if needed */
         __vmread(GUEST_CR4, &crn);
         if ( (!(crn & X86_CR4_PAE)) &&
-          test_bit(VMX_CPU_STATE_PAE_ENABLED,
-              &d->arch.arch_vmx.cpu_state)){
+             test_bit(VMX_CPU_STATE_PAE_ENABLED,
+                      &d->arch.arch_vmx.cpu_state)){
             VMX_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n");
             __vmwrite(GUEST_CR4, crn | X86_CR4_PAE);
         }
 #elif defined( __i386__)
-               unsigned long old_base_mfn;
+        unsigned long old_base_mfn;
         old_base_mfn = pagetable_get_pfn(d->arch.guest_table);
         if (old_base_mfn)
             put_page(pfn_to_page(old_base_mfn));
@@ -1095,14 +1095,14 @@
         update_pagetables(d);
 
         VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", 
-                (unsigned long) (mfn << PAGE_SHIFT));
+                    (unsigned long) (mfn << PAGE_SHIFT));
 
         __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
         /* 
          * arch->shadow_table should hold the next CR3 for shadow
          */
         VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx", 
-                d->arch.arch_vmx.cpu_cr3, mfn);
+                    d->arch.arch_vmx.cpu_cr3, mfn);
     }
 
     /*
@@ -1129,29 +1129,29 @@
                 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
             }
         }
-       __vmread(GUEST_RIP, &eip);
-       VMX_DBG_LOG(DBG_LEVEL_1,
-           "Disabling CR0.PE at %%eip 0x%lx\n", eip);
-       if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
-           set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &d->arch.arch_vmx.cpu_state);
-           __vmread(GUEST_RIP, &eip);
-           VMX_DBG_LOG(DBG_LEVEL_1,
-               "Transfering control to vmxassist %%eip 0x%lx\n", eip);
-           return 0; /* do not update eip! */
-       }
+        __vmread(GUEST_RIP, &eip);
+        VMX_DBG_LOG(DBG_LEVEL_1,
+                    "Disabling CR0.PE at %%eip 0x%lx\n", eip);
+        if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
+            set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &d->arch.arch_vmx.cpu_state);
+            __vmread(GUEST_RIP, &eip);
+            VMX_DBG_LOG(DBG_LEVEL_1,
+                        "Transfering control to vmxassist %%eip 0x%lx\n", eip);
+            return 0; /* do not update eip! */
+        }
     } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
-                                       &d->arch.arch_vmx.cpu_state)) {
-       __vmread(GUEST_RIP, &eip);
-       VMX_DBG_LOG(DBG_LEVEL_1,
-           "Enabling CR0.PE at %%eip 0x%lx\n", eip);
-       if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
-           clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
-                                       &d->arch.arch_vmx.cpu_state);
-           __vmread(GUEST_RIP, &eip);
-           VMX_DBG_LOG(DBG_LEVEL_1,
-               "Restoring to %%eip 0x%lx\n", eip);
-           return 0; /* do not update eip! */
-       }
+                        &d->arch.arch_vmx.cpu_state)) {
+        __vmread(GUEST_RIP, &eip);
+        VMX_DBG_LOG(DBG_LEVEL_1,
+                    "Enabling CR0.PE at %%eip 0x%lx\n", eip);
+        if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
+            clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
+                      &d->arch.arch_vmx.cpu_state);
+            __vmread(GUEST_RIP, &eip);
+            VMX_DBG_LOG(DBG_LEVEL_1,
+                        "Restoring to %%eip 0x%lx\n", eip);
+            return 0; /* do not update eip! */
+        }
     }
 
     return 1;
@@ -1198,8 +1198,8 @@
         CASE_GET_REG(ESI, esi);
         CASE_GET_REG(EDI, edi);
         CASE_EXTEND_GET_REG
-    case REG_ESP:
-        __vmread(GUEST_RSP, &value);
+            case REG_ESP:
+                __vmread(GUEST_RSP, &value);
         break;
     default:
         printk("invalid gp: %d\n", gp);
@@ -1212,7 +1212,7 @@
     switch(cr) {
     case 0: 
     {
-       return vmx_set_cr0(value);
+        return vmx_set_cr0(value);
     }
     case 3: 
     {
@@ -1262,7 +1262,7 @@
              */
             d->arch.arch_vmx.cpu_cr3 = value;
             VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
-                    value);
+                        value);
             __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
         }
         break;
@@ -1332,8 +1332,8 @@
         CASE_SET_REG(ESI, esi);
         CASE_SET_REG(EDI, edi);
         CASE_EXTEND_SET_REG
-    case REG_ESP:
-        __vmwrite(GUEST_RSP, value);
+            case REG_ESP:
+                __vmwrite(GUEST_RSP, value);
         regs->esp = value;
         break;
     default:
@@ -1381,9 +1381,9 @@
     case TYPE_LMSW:
         TRACE_VMEXIT(1,TYPE_LMSW);
         __vmread(CR0_READ_SHADOW, &value);
-       value = (value & ~0xF) |
-               (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
-       return vmx_set_cr0(value);
+        value = (value & ~0xF) |
+            (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
+        return vmx_set_cr0(value);
         break;
     default:
         __vmx_bug(regs);
@@ -1400,20 +1400,20 @@
                 (unsigned long)regs->ecx, (unsigned long)regs->eax, 
                 (unsigned long)regs->edx);
     switch (regs->ecx) {
-        case MSR_IA32_SYSENTER_CS:
-            __vmread(GUEST_SYSENTER_CS, (u32 *)&msr_content);
-            break;
-        case MSR_IA32_SYSENTER_ESP:
-             __vmread(GUEST_SYSENTER_ESP, &msr_content);
-            break;
-        case MSR_IA32_SYSENTER_EIP:
-            __vmread(GUEST_SYSENTER_EIP, &msr_content);
-            break;
-        default:
-            if(long_mode_do_msr_read(regs))
-                return;
-            rdmsr_user(regs->ecx, regs->eax, regs->edx);
-            break;
+    case MSR_IA32_SYSENTER_CS:
+        __vmread(GUEST_SYSENTER_CS, (u32 *)&msr_content);
+        break;
+    case MSR_IA32_SYSENTER_ESP:
+        __vmread(GUEST_SYSENTER_ESP, &msr_content);
+        break;
+    case MSR_IA32_SYSENTER_EIP:
+        __vmread(GUEST_SYSENTER_EIP, &msr_content);
+        break;
+    default:
+        if(long_mode_do_msr_read(regs))
+            return;
+        rdmsr_user(regs->ecx, regs->eax, regs->edx);
+        break;
     }
 
     regs->eax = msr_content & 0xFFFFFFFF;
@@ -1436,18 +1436,18 @@
     msr_content = (regs->eax & 0xFFFFFFFF) | ((u64)regs->edx << 32);
 
     switch (regs->ecx) {
-        case MSR_IA32_SYSENTER_CS:
-            __vmwrite(GUEST_SYSENTER_CS, msr_content);
-            break;
-        case MSR_IA32_SYSENTER_ESP:
-             __vmwrite(GUEST_SYSENTER_ESP, msr_content);
-            break;
-        case MSR_IA32_SYSENTER_EIP:
-            __vmwrite(GUEST_SYSENTER_EIP, msr_content);
-            break;
-        default:
-            long_mode_do_msr_write(regs);
-            break;
+    case MSR_IA32_SYSENTER_CS:
+        __vmwrite(GUEST_SYSENTER_CS, msr_content);
+        break;
+    case MSR_IA32_SYSENTER_ESP:
+        __vmwrite(GUEST_SYSENTER_ESP, msr_content);
+        break;
+    case MSR_IA32_SYSENTER_EIP:
+        __vmwrite(GUEST_SYSENTER_EIP, msr_content);
+        break;
+    default:
+        long_mode_do_msr_write(regs);
+        break;
     }
 
     VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write returns: "
@@ -1491,28 +1491,28 @@
     local_irq_disable();
 
     switch(vector) {
-        case LOCAL_TIMER_VECTOR:
-            smp_apic_timer_interrupt(regs);
-            break;
-        case EVENT_CHECK_VECTOR:
-            smp_event_check_interrupt();
-            break;
-        case INVALIDATE_TLB_VECTOR:
-            smp_invalidate_interrupt();
-            break;
-        case CALL_FUNCTION_VECTOR:
-            smp_call_function_interrupt();
-            break;
-        case SPURIOUS_APIC_VECTOR:
-            smp_spurious_interrupt(regs);
-            break;
-        case ERROR_APIC_VECTOR:
-            smp_error_interrupt(regs);
-            break;
-        default:
-            regs->entry_vector = vector;
-            do_IRQ(regs);
-            break;
+    case LOCAL_TIMER_VECTOR:
+        smp_apic_timer_interrupt(regs);
+        break;
+    case EVENT_CHECK_VECTOR:
+        smp_event_check_interrupt();
+        break;
+    case INVALIDATE_TLB_VECTOR:
+        smp_invalidate_interrupt();
+        break;
+    case CALL_FUNCTION_VECTOR:
+        smp_call_function_interrupt();
+        break;
+    case SPURIOUS_APIC_VECTOR:
+        smp_spurious_interrupt(regs);
+        break;
+    case ERROR_APIC_VECTOR:
+        smp_error_interrupt(regs);
+        break;
+    default:
+        regs->entry_vector = vector;
+        do_IRQ(regs);
+        break;
     }
 }
 
@@ -1604,17 +1604,17 @@
 
     __vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field);
     if (idtv_info_field & INTR_INFO_VALID_MASK) {
-       __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
-
-       __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
-       if (inst_len >= 1 && inst_len <= 15) 
-           __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
-
-       if (idtv_info_field & 0x800) { /* valid error code */
-           unsigned long error_code;
-           __vmread(IDT_VECTORING_ERROR_CODE, &error_code);
-           __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
-       } 
+        __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
+
+        __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
+        if (inst_len >= 1 && inst_len <= 15) 
+            __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
+
+        if (idtv_info_field & 0x800) { /* valid error code */
+            unsigned long error_code;
+            __vmread(IDT_VECTORING_ERROR_CODE, &error_code);
+            __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
+        } 
 
         VMX_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
     }
@@ -1652,7 +1652,7 @@
             __vmx_bug(&regs);
         vector &= 0xff;
 
-        TRACE_VMEXIT(1,vector);
+        TRACE_VMEXIT(1,vector);
         perfc_incra(cause_vector, vector);
 
         TRACE_3D(TRC_VMX_VECTOR, v->domain->domain_id, eip, vector);
@@ -1698,8 +1698,8 @@
             __vmread(EXIT_QUALIFICATION, &va);
             __vmread(VM_EXIT_INTR_ERROR_CODE, &regs.error_code);
             
-           TRACE_VMEXIT(3,regs.error_code);
-           TRACE_VMEXIT(4,va);
+            TRACE_VMEXIT(3,regs.error_code);
+            TRACE_VMEXIT(4,va);
 
             VMX_DBG_LOG(DBG_LEVEL_VMMU, 
                         "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
@@ -1732,7 +1732,7 @@
         break;
     case EXIT_REASON_PENDING_INTERRUPT:
         __vmwrite(CPU_BASED_VM_EXEC_CONTROL, 
-              MONITOR_CPU_BASED_EXEC_CONTROLS);
+                  MONITOR_CPU_BASED_EXEC_CONTROLS);
         break;
     case EXIT_REASON_TASK_SWITCH:
         __vmx_bug(&regs);
@@ -1772,10 +1772,10 @@
         __vmread(EXIT_QUALIFICATION, &exit_qualification);
 
         VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification 
= %lx", 
-                eip, inst_len, exit_qualification);
+                    eip, inst_len, exit_qualification);
         if (vmx_cr_access(exit_qualification, &regs))
-           __update_guest_eip(inst_len);
-        TRACE_VMEXIT(3,regs.error_code);
+            __update_guest_eip(inst_len);
+        TRACE_VMEXIT(3,regs.error_code);
         TRACE_VMEXIT(4,exit_qualification);
         break;
     }
@@ -1828,8 +1828,8 @@
 asmlinkage void trace_vmentry (void)
 {
     TRACE_5D(TRC_VMENTRY,trace_values[current->processor][0],
-          
trace_values[current->processor][1],trace_values[current->processor][2],
-          
trace_values[current->processor][3],trace_values[current->processor][4]);
+             
trace_values[current->processor][1],trace_values[current->processor][2],
+             
trace_values[current->processor][3],trace_values[current->processor][4]);
     TRACE_VMEXIT(0,9);
     TRACE_VMEXIT(1,9);
     TRACE_VMEXIT(2,9);
diff -r 4508c22dc458 -r 3feb7fa331ed xen/arch/x86/vmx_intercept.c
--- a/xen/arch/x86/vmx_intercept.c      Sun Sep 11 16:36:24 2005
+++ b/xen/arch/x86/vmx_intercept.c      Sun Sep 11 16:44:23 2005
@@ -45,8 +45,8 @@
         addr   = handler->hdl_list[i].addr;
         offset = handler->hdl_list[i].offset;
         if (p->addr >= addr &&
-           p->addr <  addr + offset)
-           return handler->hdl_list[i].action(p);
+            p->addr <  addr + offset)
+            return handler->hdl_list[i].action(p);
     }
     return 0;
 }
@@ -172,22 +172,22 @@
 
     if (p->size != 1 ||
         p->pdata_valid ||
-       p->type != IOREQ_TYPE_PIO)
+        p->type != IOREQ_TYPE_PIO)
         return 0;
     
     if (p->addr == PIT_MODE &&
-       p->dir == 0 &&                          /* write */
-        ((p->u.data >> 4) & 0x3) == 0 &&       /* latch command */
+        p->dir == 0 &&    /* write */
+        ((p->u.data >> 4) & 0x3) == 0 && /* latch command */
         ((p->u.data >> 6) & 0x3) == (vpit->channel)) {/* right channel */
         pit_latch_io(vpit);
-       return 1;
+        return 1;
     }
 
     if (p->addr == (PIT_CH0 + vpit->channel) &&
-       p->dir == 1) {  /* read */
+        p->dir == 1) { /* read */
         p->u.data = pit_read_io(vpit);
         resume_pit_io(p);
-       return 1;
+        return 1;
     }
 
     return 0;
@@ -253,8 +253,8 @@
         vpit->channel = ((p->u.data >> 24) & 0x3);
         vpit->first_injected = 0;
 
-       vpit->count_LSB_latched = 0;
-       vpit->count_MSB_latched = 0;
+        vpit->count_LSB_latched = 0;
+        vpit->count_MSB_latched = 0;
 
         rw_mode = ((p->u.data >> 26) & 0x3);
         switch(rw_mode) {
@@ -280,9 +280,19 @@
         /*restore the state*/
         p->state = STATE_IORESP_READY;
 
-       /* register handler to intercept the PIT io when vm_exit */
+        /* register handler to intercept the PIT io when vm_exit */
         if (!reinit)
-           register_portio_handler(0x40, 4, intercept_pit_io); 
+            register_portio_handler(0x40, 4, intercept_pit_io); 
     }
 }
 #endif /* CONFIG_VMX */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 4508c22dc458 -r 3feb7fa331ed xen/arch/x86/vmx_io.c
--- a/xen/arch/x86/vmx_io.c     Sun Sep 11 16:36:24 2005
+++ b/xen/arch/x86/vmx_io.c     Sun Sep 11 16:44:23 2005
@@ -16,6 +16,7 @@
  * Place - Suite 330, Boston, MA 02111-1307 USA.
  *
  */
+
 #include <xen/config.h>
 #include <xen/init.h>
 #include <xen/mm.h>
@@ -198,24 +199,24 @@
 static inline void __set_reg_value(unsigned long *reg, int size, long value)
 {
     switch (size) {
-        case BYTE_64:
-            *reg &= ~0xFF;
-            *reg |= (value & 0xFF);
-            break;
-        case WORD:
-            *reg &= ~0xFFFF;
-            *reg |= (value & 0xFFFF);
-            break;
-        case LONG:
-            *reg &= ~0xFFFFFFFF;
-            *reg |= (value & 0xFFFFFFFF);
-            break;
-        case QUAD:
-            *reg = value;
-            break;
-        default:
-           printk("Error: <__set_reg_value>: size:%x is invalid\n", size);
-            domain_crash_synchronous();
+    case BYTE_64:
+        *reg &= ~0xFF;
+        *reg |= (value & 0xFF);
+        break;
+    case WORD:
+        *reg &= ~0xFFFF;
+        *reg |= (value & 0xFFFF);
+        break;
+    case LONG:
+        *reg &= ~0xFFFFFFFF;
+        *reg |= (value & 0xFFFFFFFF);
+        break;
+    case QUAD:
+        *reg = value;
+        break;
+    default:
+        printk("Error: <__set_reg_value>: size:%x is invalid\n", size);
+        domain_crash_synchronous();
     }
 }
 
@@ -223,98 +224,98 @@
 {
     if (size == BYTE) {
         switch (index) {
-            case 0:
-                regs->rax &= ~0xFF;
-                regs->rax |= (value & 0xFF);
-                break;
-            case 1:
-                regs->rcx &= ~0xFF;
-                regs->rcx |= (value & 0xFF);
-                break;
-            case 2:
-                regs->rdx &= ~0xFF;
-                regs->rdx |= (value & 0xFF);
-                break;
-            case 3:
-                regs->rbx &= ~0xFF;
-                regs->rbx |= (value & 0xFF);
-                break;
-            case 4:
-                regs->rax &= 0xFFFFFFFFFFFF00FF;
-                regs->rax |= ((value & 0xFF) << 8);
-                break;
-            case 5:
-                regs->rcx &= 0xFFFFFFFFFFFF00FF;
-                regs->rcx |= ((value & 0xFF) << 8);
-                break;
-            case 6:
-                regs->rdx &= 0xFFFFFFFFFFFF00FF;
-                regs->rdx |= ((value & 0xFF) << 8);
-                break;
-            case 7:
-                regs->rbx &= 0xFFFFFFFFFFFF00FF;
-                regs->rbx |= ((value & 0xFF) << 8);
-                break;
-            default:
-                printk("Error: size:%x, index:%x are invalid!\n", size, index);
-                domain_crash_synchronous();
-                break;
+        case 0:
+            regs->rax &= ~0xFF;
+            regs->rax |= (value & 0xFF);
+            break;
+        case 1:
+            regs->rcx &= ~0xFF;
+            regs->rcx |= (value & 0xFF);
+            break;
+        case 2:
+            regs->rdx &= ~0xFF;
+            regs->rdx |= (value & 0xFF);
+            break;
+        case 3:
+            regs->rbx &= ~0xFF;
+            regs->rbx |= (value & 0xFF);
+            break;
+        case 4:
+            regs->rax &= 0xFFFFFFFFFFFF00FF;
+            regs->rax |= ((value & 0xFF) << 8);
+            break;
+        case 5:
+            regs->rcx &= 0xFFFFFFFFFFFF00FF;
+            regs->rcx |= ((value & 0xFF) << 8);
+            break;
+        case 6:
+            regs->rdx &= 0xFFFFFFFFFFFF00FF;
+            regs->rdx |= ((value & 0xFF) << 8);
+            break;
+        case 7:
+            regs->rbx &= 0xFFFFFFFFFFFF00FF;
+            regs->rbx |= ((value & 0xFF) << 8);
+            break;
+        default:
+            printk("Error: size:%x, index:%x are invalid!\n", size, index);
+            domain_crash_synchronous();
+            break;
         }
         return;
     }
 
     switch (index) {
-        case 0: 
-            __set_reg_value(&regs->rax, size, value);
-            break;
-        case 1: 
-            __set_reg_value(&regs->rcx, size, value);
-            break;
-        case 2: 
-            __set_reg_value(&regs->rdx, size, value);
-            break;
-        case 3: 
-            __set_reg_value(&regs->rbx, size, value);
-            break;
-        case 4: 
-            __set_reg_value(&regs->rsp, size, value);
-            break;
-        case 5: 
-            __set_reg_value(&regs->rbp, size, value);
-            break;
-        case 6: 
-            __set_reg_value(&regs->rsi, size, value);
-            break;
-        case 7: 
-            __set_reg_value(&regs->rdi, size, value);
-            break;
-        case 8: 
-            __set_reg_value(&regs->r8, size, value);
-            break;
-        case 9: 
-            __set_reg_value(&regs->r9, size, value);
-            break;
-        case 10: 
-            __set_reg_value(&regs->r10, size, value);
-            break;
-        case 11: 
-            __set_reg_value(&regs->r11, size, value);
-            break;
-        case 12: 
-            __set_reg_value(&regs->r12, size, value);
-            break;
-        case 13: 
-            __set_reg_value(&regs->r13, size, value);
-            break;
-        case 14: 
-            __set_reg_value(&regs->r14, size, value);
-            break;
-        case 15: 
-            __set_reg_value(&regs->r15, size, value);
-            break;
-        default:
-            printk("Error: <set_reg_value> Invalid index\n");
-            domain_crash_synchronous();
+    case 0: 
+        __set_reg_value(&regs->rax, size, value);
+        break;
+    case 1: 
+        __set_reg_value(&regs->rcx, size, value);
+        break;
+    case 2: 
+        __set_reg_value(&regs->rdx, size, value);
+        break;
+    case 3: 
+        __set_reg_value(&regs->rbx, size, value);
+        break;
+    case 4: 
+        __set_reg_value(&regs->rsp, size, value);
+        break;
+    case 5: 
+        __set_reg_value(&regs->rbp, size, value);
+        break;
+    case 6: 
+        __set_reg_value(&regs->rsi, size, value);
+        break;
+    case 7: 
+        __set_reg_value(&regs->rdi, size, value);
+        break;
+    case 8: 
+        __set_reg_value(&regs->r8, size, value);
+        break;
+    case 9: 
+        __set_reg_value(&regs->r9, size, value);
+        break;
+    case 10: 
+        __set_reg_value(&regs->r10, size, value);
+        break;
+    case 11: 
+        __set_reg_value(&regs->r11, size, value);
+        break;
+    case 12: 
+        __set_reg_value(&regs->r12, size, value);
+        break;
+    case 13: 
+        __set_reg_value(&regs->r13, size, value);
+        break;
+    case 14: 
+        __set_reg_value(&regs->r14, size, value);
+        break;
+    case 15: 
+        __set_reg_value(&regs->r15, size, value);
+        break;
+    default:
+        printk("Error: <set_reg_value> Invalid index\n");
+        domain_crash_synchronous();
     }
     return;
 }
@@ -323,44 +324,44 @@
 extern long get_reg_value(int size, int index, int seg, struct cpu_user_regs 
*regs);
 
 static inline void set_eflags_CF(int size, unsigned long v1,
-       unsigned long v2, struct cpu_user_regs *regs)
+                                 unsigned long v2, struct cpu_user_regs *regs)
 {
     unsigned long mask = (1 << (8 * size)) - 1;
 
     if ((v1 & mask) > (v2 & mask))
-       regs->eflags |= X86_EFLAGS_CF;
+        regs->eflags |= X86_EFLAGS_CF;
     else
-       regs->eflags &= ~X86_EFLAGS_CF;
+        regs->eflags &= ~X86_EFLAGS_CF;
 }
 
 static inline void set_eflags_OF(int size, unsigned long v1,
-       unsigned long v2, unsigned long v3, struct cpu_user_regs *regs)
+                                 unsigned long v2, unsigned long v3, struct 
cpu_user_regs *regs)
 {
     if ((v3 ^ v2) & (v3 ^ v1) & (1 << ((8 * size) - 1)))
-       regs->eflags |= X86_EFLAGS_OF;
+        regs->eflags |= X86_EFLAGS_OF;
 }
 
 static inline void set_eflags_AF(int size, unsigned long v1,
-       unsigned long v2, unsigned long v3, struct cpu_user_regs *regs)
+                                 unsigned long v2, unsigned long v3, struct 
cpu_user_regs *regs)
 {
     if ((v1 ^ v2 ^ v3) & 0x10)
-       regs->eflags |= X86_EFLAGS_AF;
+        regs->eflags |= X86_EFLAGS_AF;
 }
 
 static inline void set_eflags_ZF(int size, unsigned long v1,
-       struct cpu_user_regs *regs)
+                                 struct cpu_user_regs *regs)
 {
     unsigned long mask = (1 << (8 * size)) - 1;
 
     if ((v1 & mask) == 0)
-       regs->eflags |= X86_EFLAGS_ZF;
+        regs->eflags |= X86_EFLAGS_ZF;
 }
 
 static inline void set_eflags_SF(int size, unsigned long v1,
-       struct cpu_user_regs *regs)
+                                 struct cpu_user_regs *regs)
 {
     if (v1 & (1 << ((8 * size) - 1)))
-       regs->eflags |= X86_EFLAGS_SF;
+        regs->eflags |= X86_EFLAGS_SF;
 }
 
 static char parity_table[256] = {
@@ -383,14 +384,14 @@
 };
 
 static inline void set_eflags_PF(int size, unsigned long v1,
-       struct cpu_user_regs *regs)
+                                 struct cpu_user_regs *regs)
 {
     if (parity_table[v1 & 0xFF])
-       regs->eflags |= X86_EFLAGS_PF;
+        regs->eflags |= X86_EFLAGS_PF;
 }
 
 static void vmx_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
-                                       struct mi_per_cpu_info *mpcip)
+                           struct mi_per_cpu_info *mpcip)
 {
     unsigned long old_eax;
     int sign = p->df ? -1 : 1;
@@ -398,28 +399,28 @@
     if (p->dir == IOREQ_WRITE) {
         if (p->pdata_valid) {
             regs->esi += sign * p->count * p->size;
-           if (mpcip->flags & REPZ)
-               regs->ecx -= p->count;
+            if (mpcip->flags & REPZ)
+                regs->ecx -= p->count;
         }
     } else {
-       if (mpcip->flags & OVERLAP) {
-           unsigned long addr;
+        if (mpcip->flags & OVERLAP) {
+            unsigned long addr;
 
             regs->edi += sign * p->count * p->size;
-           if (mpcip->flags & REPZ)
-               regs->ecx -= p->count;
-
-           addr = regs->edi;
-           if (sign > 0)
-               addr -= p->size;
-           vmx_copy(&p->u.data, addr, p->size, VMX_COPY_OUT);
-       } else if (p->pdata_valid) {
+            if (mpcip->flags & REPZ)
+                regs->ecx -= p->count;
+
+            addr = regs->edi;
+            if (sign > 0)
+                addr -= p->size;
+            vmx_copy(&p->u.data, addr, p->size, VMX_COPY_OUT);
+        } else if (p->pdata_valid) {
             regs->edi += sign * p->count * p->size;
-           if (mpcip->flags & REPZ)
-               regs->ecx -= p->count;
+            if (mpcip->flags & REPZ)
+                regs->ecx -= p->count;
         } else {
-           old_eax = regs->eax;
-           switch (p->size) {
+            old_eax = regs->eax;
+            switch (p->size) {
             case 1:
                 regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
                 break;
@@ -430,15 +431,15 @@
                 regs->eax = (p->u.data & 0xffffffff);
                 break;
             default:
-               printk("Error: %s unknown port size\n", __FUNCTION__);
-               domain_crash_synchronous();
-           }
-       }
+                printk("Error: %s unknown port size\n", __FUNCTION__);
+                domain_crash_synchronous();
+            }
+        }
     }
 }
 
 static void vmx_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
-                                       struct mi_per_cpu_info *mpcip)
+                            struct mi_per_cpu_info *mpcip)
 {
     int sign = p->df ? -1 : 1;
     int size = -1, index = -1;
@@ -451,178 +452,178 @@
 
     switch (mpcip->instr) {
     case INSTR_MOV:
-       if (dst & REGISTER) {
-           index = operand_index(dst);
-           set_reg_value(size, index, 0, regs, p->u.data);
-       }
-       break;
+        if (dst & REGISTER) {
+            index = operand_index(dst);
+            set_reg_value(size, index, 0, regs, p->u.data);
+        }
+        break;
 
     case INSTR_MOVZ:
-       if (dst & REGISTER) {
-           index = operand_index(dst);
-           switch (size) {
-           case BYTE: p->u.data = p->u.data & 0xFFULL; break;
-           case WORD: p->u.data = p->u.data & 0xFFFFULL; break;
-           case LONG: p->u.data = p->u.data & 0xFFFFFFFFULL; break;
-           }
-           set_reg_value(operand_size(dst), index, 0, regs, p->u.data);
-       }
-       break;
+        if (dst & REGISTER) {
+            index = operand_index(dst);
+            switch (size) {
+            case BYTE: p->u.data = p->u.data & 0xFFULL; break;
+            case WORD: p->u.data = p->u.data & 0xFFFFULL; break;
+            case LONG: p->u.data = p->u.data & 0xFFFFFFFFULL; break;
+            }
+            set_reg_value(operand_size(dst), index, 0, regs, p->u.data);
+        }
+        break;
 
     case INSTR_MOVS:
-       sign = p->df ? -1 : 1;
-       regs->esi += sign * p->count * p->size;
-       regs->edi += sign * p->count * p->size;
-
-       if ((mpcip->flags & OVERLAP) && p->dir == IOREQ_READ) {
-           unsigned long addr = regs->edi;
-
-           if (sign > 0)
-               addr -= p->size;
-           vmx_copy(&p->u.data, addr, p->size, VMX_COPY_OUT);
-       }
-
-       if (mpcip->flags & REPZ)
-           regs->ecx -= p->count;
-       break;
+        sign = p->df ? -1 : 1;
+        regs->esi += sign * p->count * p->size;
+        regs->edi += sign * p->count * p->size;
+
+        if ((mpcip->flags & OVERLAP) && p->dir == IOREQ_READ) {
+            unsigned long addr = regs->edi;
+
+            if (sign > 0)
+                addr -= p->size;
+            vmx_copy(&p->u.data, addr, p->size, VMX_COPY_OUT);
+        }
+
+        if (mpcip->flags & REPZ)
+            regs->ecx -= p->count;
+        break;
 
     case INSTR_STOS:
-       sign = p->df ? -1 : 1;
-       regs->edi += sign * p->count * p->size;
-       if (mpcip->flags & REPZ)
-           regs->ecx -= p->count;
-       break;
+        sign = p->df ? -1 : 1;
+        regs->edi += sign * p->count * p->size;
+        if (mpcip->flags & REPZ)
+            regs->ecx -= p->count;
+        break;
 
     case INSTR_AND:
-       if (src & REGISTER) {
-           index = operand_index(src);
-           value = get_reg_value(size, index, 0, regs);
-           diff = (unsigned long) p->u.data & value;
-       } else if (src & IMMEDIATE) {
-           value = mpcip->immediate;
-           diff = (unsigned long) p->u.data & value;
-       } else if (src & MEMORY) {
-           index = operand_index(dst);
-           value = get_reg_value(size, index, 0, regs);
-           diff = (unsigned long) p->u.data & value;
-           set_reg_value(size, index, 0, regs, diff);
-       }
-
-       /*
-        * The OF and CF flags are cleared; the SF, ZF, and PF
-        * flags are set according to the result. The state of
-        * the AF flag is undefined.
-        */
-       regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
-                         X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
-       set_eflags_ZF(size, diff, regs);
-       set_eflags_SF(size, diff, regs);
-       set_eflags_PF(size, diff, regs);
-       break;
+        if (src & REGISTER) {
+            index = operand_index(src);
+            value = get_reg_value(size, index, 0, regs);
+            diff = (unsigned long) p->u.data & value;
+        } else if (src & IMMEDIATE) {
+            value = mpcip->immediate;
+            diff = (unsigned long) p->u.data & value;
+        } else if (src & MEMORY) {
+            index = operand_index(dst);
+            value = get_reg_value(size, index, 0, regs);
+            diff = (unsigned long) p->u.data & value;
+            set_reg_value(size, index, 0, regs, diff);
+        }
+
+        /*
+         * The OF and CF flags are cleared; the SF, ZF, and PF
+         * flags are set according to the result. The state of
+         * the AF flag is undefined.
+         */
+        regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
+                          X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
+        set_eflags_ZF(size, diff, regs);
+        set_eflags_SF(size, diff, regs);
+        set_eflags_PF(size, diff, regs);
+        break;
 
     case INSTR_OR:
-       if (src & REGISTER) {
-           index = operand_index(src);
-           value = get_reg_value(size, index, 0, regs);
-           diff = (unsigned long) p->u.data | value;
-       } else if (src & IMMEDIATE) {
-           value = mpcip->immediate;
-           diff = (unsigned long) p->u.data | value;
-       } else if (src & MEMORY) {
-           index = operand_index(dst);
-           value = get_reg_value(size, index, 0, regs);
-           diff = (unsigned long) p->u.data | value;
-           set_reg_value(size, index, 0, regs, diff);
-       }
-
-       /*
-        * The OF and CF flags are cleared; the SF, ZF, and PF
-        * flags are set according to the result. The state of
-        * the AF flag is undefined.
-        */
-       regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
-                         X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
-       set_eflags_ZF(size, diff, regs);
-       set_eflags_SF(size, diff, regs);
-       set_eflags_PF(size, diff, regs);
-       break;
+        if (src & REGISTER) {
+            index = operand_index(src);
+            value = get_reg_value(size, index, 0, regs);
+            diff = (unsigned long) p->u.data | value;
+        } else if (src & IMMEDIATE) {
+            value = mpcip->immediate;
+            diff = (unsigned long) p->u.data | value;
+        } else if (src & MEMORY) {
+            index = operand_index(dst);
+            value = get_reg_value(size, index, 0, regs);
+            diff = (unsigned long) p->u.data | value;
+            set_reg_value(size, index, 0, regs, diff);
+        }
+
+        /*
+         * The OF and CF flags are cleared; the SF, ZF, and PF
+         * flags are set according to the result. The state of
+         * the AF flag is undefined.
+         */
+        regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
+                          X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
+        set_eflags_ZF(size, diff, regs);
+        set_eflags_SF(size, diff, regs);
+        set_eflags_PF(size, diff, regs);
+        break;
 
     case INSTR_XOR:
-       if (src & REGISTER) {
-           index = operand_index(src);
-           value = get_reg_value(size, index, 0, regs);
-           diff = (unsigned long) p->u.data ^ value;
-       } else if (src & IMMEDIATE) {
-           value = mpcip->immediate;
-           diff = (unsigned long) p->u.data ^ value;
-       } else if (src & MEMORY) {
-           index = operand_index(dst);
-           value = get_reg_value(size, index, 0, regs);
-           diff = (unsigned long) p->u.data ^ value;
-           set_reg_value(size, index, 0, regs, diff);
-       }
-
-       /*
-        * The OF and CF flags are cleared; the SF, ZF, and PF
-        * flags are set according to the result. The state of
-        * the AF flag is undefined.
-        */
-       regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
-                         X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
-       set_eflags_ZF(size, diff, regs);
-       set_eflags_SF(size, diff, regs);
-       set_eflags_PF(size, diff, regs);
-       break;
+        if (src & REGISTER) {
+            index = operand_index(src);
+            value = get_reg_value(size, index, 0, regs);
+            diff = (unsigned long) p->u.data ^ value;
+        } else if (src & IMMEDIATE) {
+            value = mpcip->immediate;
+            diff = (unsigned long) p->u.data ^ value;
+        } else if (src & MEMORY) {
+            index = operand_index(dst);
+            value = get_reg_value(size, index, 0, regs);
+            diff = (unsigned long) p->u.data ^ value;
+            set_reg_value(size, index, 0, regs, diff);
+        }
+
+        /*
+         * The OF and CF flags are cleared; the SF, ZF, and PF
+         * flags are set according to the result. The state of
+         * the AF flag is undefined.
+         */
+        regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
+                          X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
+        set_eflags_ZF(size, diff, regs);
+        set_eflags_SF(size, diff, regs);
+        set_eflags_PF(size, diff, regs);
+        break;
 
     case INSTR_CMP:
-       if (src & REGISTER) {
-           index = operand_index(src);
-           value = get_reg_value(size, index, 0, regs);
-           diff = (unsigned long) p->u.data - value;
-       } else if (src & IMMEDIATE) {
-           value = mpcip->immediate;
-           diff = (unsigned long) p->u.data - value;
-       } else if (src & MEMORY) {
-           index = operand_index(dst);
-           value = get_reg_value(size, index, 0, regs);
-           diff = value - (unsigned long) p->u.data;
-       }
-
-       /*
-        * The CF, OF, SF, ZF, AF, and PF flags are set according
-        * to the result
-        */
-       regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
-                         X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
-       set_eflags_CF(size, value, (unsigned long) p->u.data, regs);
-       set_eflags_OF(size, diff, value, (unsigned long) p->u.data, regs);
-       set_eflags_AF(size, diff, value, (unsigned long) p->u.data, regs);
-       set_eflags_ZF(size, diff, regs);
-       set_eflags_SF(size, diff, regs);
-       set_eflags_PF(size, diff, regs);
-       break;
+        if (src & REGISTER) {
+            index = operand_index(src);
+            value = get_reg_value(size, index, 0, regs);
+            diff = (unsigned long) p->u.data - value;
+        } else if (src & IMMEDIATE) {
+            value = mpcip->immediate;
+            diff = (unsigned long) p->u.data - value;
+        } else if (src & MEMORY) {
+            index = operand_index(dst);
+            value = get_reg_value(size, index, 0, regs);
+            diff = value - (unsigned long) p->u.data;
+        }
+
+        /*
+         * The CF, OF, SF, ZF, AF, and PF flags are set according
+         * to the result
+         */
+        regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
+                          X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
+        set_eflags_CF(size, value, (unsigned long) p->u.data, regs);
+        set_eflags_OF(size, diff, value, (unsigned long) p->u.data, regs);
+        set_eflags_AF(size, diff, value, (unsigned long) p->u.data, regs);
+        set_eflags_ZF(size, diff, regs);
+        set_eflags_SF(size, diff, regs);
+        set_eflags_PF(size, diff, regs);
+        break;
 
     case INSTR_TEST:
-       if (src & REGISTER) {
-           index = operand_index(src);
-           value = get_reg_value(size, index, 0, regs);
-       } else if (src & IMMEDIATE) {
-           value = mpcip->immediate;
-       } else if (src & MEMORY) {
-           index = operand_index(dst);
-           value = get_reg_value(size, index, 0, regs);
-       }
-       diff = (unsigned long) p->u.data & value;
-
-       /*
-        * Sets the SF, ZF, and PF status flags. CF and OF are set to 0
-        */
-       regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
-                         X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
-       set_eflags_ZF(size, diff, regs);
-       set_eflags_SF(size, diff, regs);
-       set_eflags_PF(size, diff, regs);
-       break;
+        if (src & REGISTER) {
+            index = operand_index(src);
+            value = get_reg_value(size, index, 0, regs);
+        } else if (src & IMMEDIATE) {
+            value = mpcip->immediate;
+        } else if (src & MEMORY) {
+            index = operand_index(dst);
+            value = get_reg_value(size, index, 0, regs);
+        }
+        diff = (unsigned long) p->u.data & value;
+
+        /*
+         * Sets the SF, ZF, and PF status flags. CF and OF are set to 0
+         */
+        regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
+                          X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
+        set_eflags_ZF(size, diff, regs);
+        set_eflags_SF(size, diff, regs);
+        set_eflags_PF(size, diff, regs);
+        break;
     }
 
     load_cpu_user_regs(regs);
@@ -644,7 +645,7 @@
     if (vio == 0) {
         VMX_DBG_LOG(DBG_LEVEL_1, 
                     "bad shared page: %lx", (unsigned long) vio);
-       printf("bad shared page: %lx\n", (unsigned long) vio);
+        printf("bad shared page: %lx\n", (unsigned long) vio);
         domain_crash_synchronous();
     }
 
@@ -655,15 +656,15 @@
     /* clear IO wait VMX flag */
     if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
         if (p->state == STATE_IORESP_READY) {
-           p->state = STATE_INVALID;
+            p->state = STATE_INVALID;
             clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
 
-           if (p->type == IOREQ_TYPE_PIO)
-               vmx_pio_assist(regs, p, mpci_p);
-           else
-               vmx_mmio_assist(regs, p, mpci_p);
-       }
-       /* else an interrupt send event raced us */
+            if (p->type == IOREQ_TYPE_PIO)
+                vmx_pio_assist(regs, p, mpci_p);
+            else
+                vmx_mmio_assist(regs, p, mpci_p);
+        }
+        /* else an interrupt send event raced us */
     }
 }
 
@@ -730,7 +731,7 @@
     return word ? bit : -1;
 }
 #else
-#define __fls(x)       generic_fls(x)
+#define __fls(x)  generic_fls(x)
 static __inline__ int generic_fls(u32 x)
 {
     int r = 31;
@@ -839,23 +840,23 @@
     struct vmx_virpit_t *vpit = &(v->domain->arch.vmx_platform.vmx_pit);
     switch(type)
     {
-        case VLAPIC_DELIV_MODE_EXT:
-            if (vpit->pending_intr_nr && vector == vpit->vector)
-                vpit->pending_intr_nr--;
-            else
-                clear_highest_bit(v, vector);
-
-            if (vector == vpit->vector && !vpit->first_injected){
-                vpit->first_injected = 1;
-                vpit->pending_intr_nr = 0;
-            }
-            if (vector == vpit->vector)
-                vpit->inject_point = NOW();
-            break;
-
-        default:
-            printk("Not support interrupt type\n");
-            break;
+    case VLAPIC_DELIV_MODE_EXT:
+        if (vpit->pending_intr_nr && vector == vpit->vector)
+            vpit->pending_intr_nr--;
+        else
+            clear_highest_bit(v, vector);
+
+        if (vector == vpit->vector && !vpit->first_injected){
+            vpit->first_injected = 1;
+            vpit->pending_intr_nr = 0;
+        }
+        if (vector == vpit->vector)
+            vpit->inject_point = NOW();
+        break;
+
+    default:
+        printk("Not support interrupt type\n");
+        break;
     }
 }
 
@@ -897,51 +898,51 @@
         return;
     }
 
-     __vmread(VM_ENTRY_INTR_INFO_FIELD, &intr_fields);
-
-     if (intr_fields & INTR_INFO_VALID_MASK) {
-         VMX_DBG_LOG(DBG_LEVEL_1, "vmx_intr_assist: intr_fields: %lx",
-           intr_fields);
-         return;
-     }
-
-     __vmread(GUEST_INTERRUPTIBILITY_INFO, &interruptibility);
-
-     if (interruptibility) {
-         enable_irq_window(cpu_exec_control);
-         VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, interruptibility: 
%lx",
-                     highest_vector, interruptibility);
-         return;
-     }
-
-     __vmread(GUEST_RFLAGS, &eflags);
-
-     switch (intr_type) {
-         case VLAPIC_DELIV_MODE_EXT:
-             if (irq_masked(eflags)) {
-                 enable_irq_window(cpu_exec_control);
-                 VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, eflags: %lx",
-                             highest_vector, eflags);
-                 return;
-             }
-
-             vmx_inject_extint(v, highest_vector, VMX_INVALID_ERROR_CODE);
-             TRACE_3D(TRC_VMX_INT, v->domain->domain_id, highest_vector, 0);
-             break;
-         case VLAPIC_DELIV_MODE_FIXED:
-         case VLAPIC_DELIV_MODE_LPRI:
-         case VLAPIC_DELIV_MODE_SMI:
-         case VLAPIC_DELIV_MODE_NMI:
-         case VLAPIC_DELIV_MODE_INIT:
-         case VLAPIC_DELIV_MODE_STARTUP:
-         default:
-             printk("Unsupported interrupt type\n");
-             BUG();
-             break;
-     }
-
-     interrupt_post_injection(v, highest_vector, intr_type);
-     return;
+    __vmread(VM_ENTRY_INTR_INFO_FIELD, &intr_fields);
+
+    if (intr_fields & INTR_INFO_VALID_MASK) {
+        VMX_DBG_LOG(DBG_LEVEL_1, "vmx_intr_assist: intr_fields: %lx",
+                    intr_fields);
+        return;
+    }
+
+    __vmread(GUEST_INTERRUPTIBILITY_INFO, &interruptibility);
+
+    if (interruptibility) {
+        enable_irq_window(cpu_exec_control);
+        VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, interruptibility: %lx",
+                    highest_vector, interruptibility);
+        return;
+    }
+
+    __vmread(GUEST_RFLAGS, &eflags);
+
+    switch (intr_type) {
+    case VLAPIC_DELIV_MODE_EXT:
+        if (irq_masked(eflags)) {
+            enable_irq_window(cpu_exec_control);
+            VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, eflags: %lx",
+                        highest_vector, eflags);
+            return;
+        }
+
+        vmx_inject_extint(v, highest_vector, VMX_INVALID_ERROR_CODE);
+        TRACE_3D(TRC_VMX_INT, v->domain->domain_id, highest_vector, 0);
+        break;
+    case VLAPIC_DELIV_MODE_FIXED:
+    case VLAPIC_DELIV_MODE_LPRI:
+    case VLAPIC_DELIV_MODE_SMI:
+    case VLAPIC_DELIV_MODE_NMI:
+    case VLAPIC_DELIV_MODE_INIT:
+    case VLAPIC_DELIV_MODE_STARTUP:
+    default:
+        printk("Unsupported interrupt type\n");
+        BUG();
+        break;
+    }
+
+    interrupt_post_injection(v, highest_vector, intr_type);
+    return;
 }
 
 void vmx_do_resume(struct vcpu *d) 
diff -r 4508c22dc458 -r 3feb7fa331ed xen/arch/x86/vmx_platform.c
--- a/xen/arch/x86/vmx_platform.c       Sun Sep 11 16:36:24 2005
+++ b/xen/arch/x86/vmx_platform.c       Sun Sep 11 16:44:23 2005
@@ -55,17 +55,17 @@
 static inline long __get_reg_value(unsigned long reg, int size)
 {
     switch(size) {
-        case BYTE_64:
-            return (char)(reg & 0xFF);
-        case WORD:
-            return (short)(reg & 0xFFFF);
-        case LONG:
-            return (int)(reg & 0xFFFFFFFF);
-        case QUAD:
-            return (long)(reg);
-        default:
-       printf("Error: (__get_reg_value) Invalid reg size\n");
-            domain_crash_synchronous();
+    case BYTE_64:
+        return (char)(reg & 0xFF);
+    case WORD:
+        return (short)(reg & 0xFFFF);
+    case LONG:
+        return (int)(reg & 0xFFFFFFFF);
+    case QUAD:
+        return (long)(reg);
+    default:
+        printf("Error: (__get_reg_value) Invalid reg size\n");
+        domain_crash_synchronous();
     }
 }
 
@@ -73,49 +73,49 @@
 {
     if (size == BYTE) {
         switch (index) { 
-       case 0: /* %al */
-                return (char)(regs->rax & 0xFF);
-       case 1: /* %cl */
-                return (char)(regs->rcx & 0xFF);
-       case 2: /* %dl */
-                return (char)(regs->rdx & 0xFF); 
-       case 3: /* %bl */
-                return (char)(regs->rbx & 0xFF);
-       case 4: /* %ah */
-                return (char)((regs->rax & 0xFF00) >> 8);
-       case 5: /* %ch */
-                return (char)((regs->rcx & 0xFF00) >> 8);
-       case 6: /* %dh */
-                return (char)((regs->rdx & 0xFF00) >> 8);
-       case 7: /* %bh */
-                return (char)((regs->rbx & 0xFF00) >> 8);
-       default:
-           printf("Error: (get_reg_value) Invalid index value\n"); 
-           domain_crash_synchronous();
+        case 0: /* %al */
+            return (char)(regs->rax & 0xFF);
+        case 1: /* %cl */
+            return (char)(regs->rcx & 0xFF);
+        case 2: /* %dl */
+            return (char)(regs->rdx & 0xFF); 
+        case 3: /* %bl */
+            return (char)(regs->rbx & 0xFF);
+        case 4: /* %ah */
+            return (char)((regs->rax & 0xFF00) >> 8);
+        case 5: /* %ch */
+            return (char)((regs->rcx & 0xFF00) >> 8);
+        case 6: /* %dh */
+            return (char)((regs->rdx & 0xFF00) >> 8);
+        case 7: /* %bh */
+            return (char)((regs->rbx & 0xFF00) >> 8);
+        default:
+            printf("Error: (get_reg_value) Invalid index value\n"); 
+            domain_crash_synchronous();
         }
-       /* NOTREACHED */
+        /* NOTREACHED */
     }
 
     switch (index) {
-        case 0: return __get_reg_value(regs->rax, size);
-        case 1: return __get_reg_value(regs->rcx, size);
-        case 2: return __get_reg_value(regs->rdx, size);
-        case 3: return __get_reg_value(regs->rbx, size);
-        case 4: return __get_reg_value(regs->rsp, size);
-        case 5: return __get_reg_value(regs->rbp, size);
-        case 6: return __get_reg_value(regs->rsi, size);
-        case 7: return __get_reg_value(regs->rdi, size);
-        case 8: return __get_reg_value(regs->r8, size);
-        case 9: return __get_reg_value(regs->r9, size);
-        case 10: return __get_reg_value(regs->r10, size);
-        case 11: return __get_reg_value(regs->r11, size);
-        case 12: return __get_reg_value(regs->r12, size);
-        case 13: return __get_reg_value(regs->r13, size);
-        case 14: return __get_reg_value(regs->r14, size);
-        case 15: return __get_reg_value(regs->r15, size);
-        default:
-           printf("Error: (get_reg_value) Invalid index value\n"); 
-           domain_crash_synchronous();
+    case 0: return __get_reg_value(regs->rax, size);
+    case 1: return __get_reg_value(regs->rcx, size);
+    case 2: return __get_reg_value(regs->rdx, size);
+    case 3: return __get_reg_value(regs->rbx, size);
+    case 4: return __get_reg_value(regs->rsp, size);
+    case 5: return __get_reg_value(regs->rbp, size);
+    case 6: return __get_reg_value(regs->rsi, size);
+    case 7: return __get_reg_value(regs->rdi, size);
+    case 8: return __get_reg_value(regs->r8, size);
+    case 9: return __get_reg_value(regs->r9, size);
+    case 10: return __get_reg_value(regs->r10, size);
+    case 11: return __get_reg_value(regs->r11, size);
+    case 12: return __get_reg_value(regs->r12, size);
+    case 13: return __get_reg_value(regs->r13, size);
+    case 14: return __get_reg_value(regs->r14, size);
+    case 15: return __get_reg_value(regs->r15, size);
+    default:
+        printf("Error: (get_reg_value) Invalid index value\n"); 
+        domain_crash_synchronous();
     }
 }
 #elif defined (__i386__)
@@ -134,12 +134,12 @@
 {                    
     switch(size) {
     case WORD:
-       return (short)(reg & 0xFFFF);
+        return (short)(reg & 0xFFFF);
     case LONG:
-       return (int)(reg & 0xFFFFFFFF);
+        return (int)(reg & 0xFFFFFFFF);
     default:
-       printf("Error: (__get_reg_value) Invalid reg size\n");
-       domain_crash_synchronous();
+        printf("Error: (__get_reg_value) Invalid reg size\n");
+        domain_crash_synchronous();
     }
 }
 
@@ -147,29 +147,29 @@
 {                    
     if (size == BYTE) {
         switch (index) { 
-       case 0: /* %al */
+        case 0: /* %al */
             return (char)(regs->eax & 0xFF);
-       case 1: /* %cl */
+        case 1: /* %cl */
             return (char)(regs->ecx & 0xFF);
-       case 2: /* %dl */
+        case 2: /* %dl */
             return (char)(regs->edx & 0xFF); 
-       case 3: /* %bl */
+        case 3: /* %bl */
             return (char)(regs->ebx & 0xFF);
-       case 4: /* %ah */
+        case 4: /* %ah */
             return (char)((regs->eax & 0xFF00) >> 8);
-       case 5: /* %ch */
+        case 5: /* %ch */
             return (char)((regs->ecx & 0xFF00) >> 8);
-       case 6: /* %dh */
+        case 6: /* %dh */
             return (char)((regs->edx & 0xFF00) >> 8);
-       case 7: /* %bh */
+        case 7: /* %bh */
             return (char)((regs->ebx & 0xFF00) >> 8);
         default:
-           printf("Error: (get_reg_value) Invalid index value\n"); 
+            printf("Error: (get_reg_value) Invalid index value\n"); 
             domain_crash_synchronous();
         }
-        }
-
-        switch (index) {
+    }
+
+    switch (index) {
     case 0: return __get_reg_value(regs->eax, size);
     case 1: return __get_reg_value(regs->ecx, size);
     case 2: return __get_reg_value(regs->edx, size);
@@ -179,46 +179,46 @@
     case 6: return __get_reg_value(regs->esi, size);
     case 7: return __get_reg_value(regs->edi, size);
     default:
-       printf("Error: (get_reg_value) Invalid index value\n"); 
+        printf("Error: (get_reg_value) Invalid index value\n"); 
         domain_crash_synchronous();
     }
 }
 #endif
 
 static inline unsigned char *check_prefix(unsigned char *inst,
-               struct instruction *thread_inst, unsigned char *rex_p)
+                                          struct instruction *thread_inst, 
unsigned char *rex_p)
 {
     while (1) {
         switch (*inst) {
-        /* rex prefix for em64t instructions */
-            case 0x40 ... 0x4e:
-                *rex_p = *inst;
-                break;
+            /* rex prefix for em64t instructions */
+        case 0x40 ... 0x4e:
+            *rex_p = *inst;
+            break;
         case 0xf3: /* REPZ */
-               thread_inst->flags = REPZ;
-               break;
+            thread_inst->flags = REPZ;
+            break;
         case 0xf2: /* REPNZ */
-               thread_inst->flags = REPNZ;
-               break;
+            thread_inst->flags = REPNZ;
+            break;
         case 0xf0: /* LOCK */
-               break;
+            break;
         case 0x2e: /* CS */
         case 0x36: /* SS */
         case 0x3e: /* DS */
         case 0x26: /* ES */
         case 0x64: /* FS */
         case 0x65: /* GS */
-               thread_inst->seg_sel = *inst;
-                break;
+            thread_inst->seg_sel = *inst;
+            break;
         case 0x66: /* 32bit->16bit */
-                thread_inst->op_size = WORD;
-                break;
-            case 0x67:
-               printf("Error: Not handling 0x67 (yet)\n");
-                domain_crash_synchronous();
-                break;
-            default:
-                return inst;
+            thread_inst->op_size = WORD;
+            break;
+        case 0x67:
+            printf("Error: Not handling 0x67 (yet)\n");
+            domain_crash_synchronous();
+            break;
+        default:
+            return inst;
         }
         inst++;
     }
@@ -240,23 +240,23 @@
     }
 
     switch(mod) {
-        case 0:
-            if (rm == 5 || rm == 4) {
-                if (op16)
-                    inst = inst + 2; //disp16, skip 2 bytes
-                else
-                    inst = inst + 4; //disp32, skip 4 bytes
-            }
-            break;
-        case 1:
-            inst++; //disp8, skip 1 byte
-            break;
-        case 2:
+    case 0:
+        if (rm == 5 || rm == 4) {
             if (op16)
                 inst = inst + 2; //disp16, skip 2 bytes
             else
                 inst = inst + 4; //disp32, skip 4 bytes
-            break;
+        }
+        break;
+    case 1:
+        inst++; //disp8, skip 1 byte
+        break;
+    case 2:
+        if (op16)
+            inst = inst + 2; //disp16, skip 2 bytes
+        else
+            inst = inst + 4; //disp32, skip 4 bytes
+        break;
     }
 
     if (op_size == QUAD)
@@ -304,19 +304,19 @@
 }
 
 #define GET_OP_SIZE_FOR_BYTE(op_size)   \
-    do {                               \
-       if (rex)                        \
-           op_size = BYTE_64;          \
-       else                            \
-           op_size = BYTE;             \
+    do {    \
+     if (rex)   \
+     op_size = BYTE_64;  \
+ else    \
+     op_size = BYTE;  \
     } while(0)
 
 #define GET_OP_SIZE_FOR_NONEBYTE(op_size)   \
-    do {                               \
-       if (rex & 0x8)                  \
-           op_size = QUAD;             \
-       else if (op_size != WORD)       \
-           op_size = LONG;             \
+    do {    \
+     if (rex & 0x8)   \
+     op_size = QUAD;  \
+ else if (op_size != WORD) \
+     op_size = LONG;  \
     } while(0)
 
 
@@ -344,7 +344,7 @@
  * Decode mem,reg operands (as in <opcode> r32/16, m32/16)
  */
 static int mem_reg(unsigned char size, unsigned char *opcode,
-                       struct instruction *instr, unsigned char rex)
+                   struct instruction *instr, unsigned char rex)
 {
     int index = get_index(opcode + 1, rex);
 
@@ -357,7 +357,7 @@
  * Decode reg,mem operands (as in <opcode> m32/16, r32/16)
  */
 static int reg_mem(unsigned char size, unsigned char *opcode,
-                       struct instruction *instr, unsigned char rex)
+                   struct instruction *instr, unsigned char rex)
 {
     int index = get_index(opcode + 1, rex);
 
@@ -382,210 +382,210 @@
         vm86 = 1;
 
     if (vm86) { /* meaning is reversed */
-       if (instr->op_size == WORD)
-           instr->op_size = LONG;
-       else if (instr->op_size == LONG)
-           instr->op_size = WORD;
-       else if (instr->op_size == 0)
-           instr->op_size = WORD;
+        if (instr->op_size == WORD)
+            instr->op_size = LONG;
+        else if (instr->op_size == LONG)
+            instr->op_size = WORD;
+        else if (instr->op_size == 0)
+            instr->op_size = WORD;
     }
 
     switch (*opcode) {
     case 0x0B: /* or m32/16, r32/16 */
-       instr->instr = INSTR_OR;
-       GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-       return mem_reg(instr->op_size, opcode, instr, rex);
+        instr->instr = INSTR_OR;
+        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+        return mem_reg(instr->op_size, opcode, instr, rex);
 
     case 0x20: /* and r8, m8 */
-       instr->instr = INSTR_AND;
-       GET_OP_SIZE_FOR_BYTE(instr->op_size);
-       return reg_mem(instr->op_size, opcode, instr, rex);
+        instr->instr = INSTR_AND;
+        GET_OP_SIZE_FOR_BYTE(instr->op_size);
+        return reg_mem(instr->op_size, opcode, instr, rex);
 
     case 0x21: /* and r32/16, m32/16 */
-       instr->instr = INSTR_AND;
-       GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-       return reg_mem(instr->op_size, opcode, instr, rex);
+        instr->instr = INSTR_AND;
+        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+        return reg_mem(instr->op_size, opcode, instr, rex);
 
     case 0x23: /* and m32/16, r32/16 */
-       instr->instr = INSTR_AND;
-       GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-       return mem_reg(instr->op_size, opcode, instr, rex);
+        instr->instr = INSTR_AND;
+        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+        return mem_reg(instr->op_size, opcode, instr, rex);
 
     case 0x30: /* xor r8, m8 */
-       instr->instr = INSTR_XOR;
-       GET_OP_SIZE_FOR_BYTE(instr->op_size);
-       return reg_mem(instr->op_size, opcode, instr, rex);
+        instr->instr = INSTR_XOR;
+        GET_OP_SIZE_FOR_BYTE(instr->op_size);
+        return reg_mem(instr->op_size, opcode, instr, rex);
 
     case 0x31: /* xor r32/16, m32/16 */
-       instr->instr = INSTR_XOR;
-       GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-       return reg_mem(instr->op_size, opcode, instr, rex);
+        instr->instr = INSTR_XOR;
+        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+        return reg_mem(instr->op_size, opcode, instr, rex);
 
     case 0x39: /* cmp r32/16, m32/16 */
-       instr->instr = INSTR_CMP;
-       GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-       return reg_mem(instr->op_size, opcode, instr, rex);
+        instr->instr = INSTR_CMP;
+        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+        return reg_mem(instr->op_size, opcode, instr, rex);
 
     case 0x80:
     case 0x81:
-       if (((opcode[1] >> 3) & 7) == 7) { /* cmp $imm, m32/16 */
-           instr->instr = INSTR_CMP;
-
-           if (opcode[0] == 0x80)
-               GET_OP_SIZE_FOR_BYTE(instr->op_size);
+        if (((opcode[1] >> 3) & 7) == 7) { /* cmp $imm, m32/16 */
+            instr->instr = INSTR_CMP;
+
+            if (opcode[0] == 0x80)
+                GET_OP_SIZE_FOR_BYTE(instr->op_size);
             else
-               GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-
-           instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
-           instr->immediate = get_immediate(vm86, opcode+1, BYTE);
-           instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
+                GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+
+            instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
+            instr->immediate = get_immediate(vm86, opcode+1, BYTE);
+            instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
 
             return DECODE_success;
-       } else
-           return DECODE_failure;
+        } else
+            return DECODE_failure;
 
     case 0x84:  /* test m8, r8 */
-       instr->instr = INSTR_TEST;
-       instr->op_size = BYTE;
-       GET_OP_SIZE_FOR_BYTE(tmp_size);
-       return mem_reg(tmp_size, opcode, instr, rex);
+        instr->instr = INSTR_TEST;
+        instr->op_size = BYTE;
+        GET_OP_SIZE_FOR_BYTE(tmp_size);
+        return mem_reg(tmp_size, opcode, instr, rex);
 
     case 0x88: /* mov r8, m8 */
-       instr->instr = INSTR_MOV;
-       instr->op_size = BYTE;
+        instr->instr = INSTR_MOV;
+        instr->op_size = BYTE;
         GET_OP_SIZE_FOR_BYTE(tmp_size);
-       return reg_mem(tmp_size, opcode, instr, rex);
+        return reg_mem(tmp_size, opcode, instr, rex);
 
     case 0x89: /* mov r32/16, m32/16 */
-       instr->instr = INSTR_MOV;
-       GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-       return reg_mem(instr->op_size, opcode, instr, rex);
+        instr->instr = INSTR_MOV;
+        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+        return reg_mem(instr->op_size, opcode, instr, rex);
 
     case 0x8A: /* mov m8, r8 */
-       instr->instr = INSTR_MOV;
-       instr->op_size = BYTE;
+        instr->instr = INSTR_MOV;
+        instr->op_size = BYTE;
         GET_OP_SIZE_FOR_BYTE(tmp_size);
-       return mem_reg(tmp_size, opcode, instr, rex);
+        return mem_reg(tmp_size, opcode, instr, rex);
 
     case 0x8B: /* mov m32/16, r32/16 */
-       instr->instr = INSTR_MOV;
-       GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-       return mem_reg(instr->op_size, opcode, instr, rex);
+        instr->instr = INSTR_MOV;
+        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+        return mem_reg(instr->op_size, opcode, instr, rex);
 
     case 0xA0: /* mov <addr>, al */
-       instr->instr = INSTR_MOV;
-       instr->op_size = BYTE;
+        instr->instr = INSTR_MOV;
+        instr->op_size = BYTE;
         GET_OP_SIZE_FOR_BYTE(tmp_size);
-       return mem_acc(tmp_size, instr);
+        return mem_acc(tmp_size, instr);
 
     case 0xA1: /* mov <addr>, ax/eax */
-       instr->instr = INSTR_MOV;
-       GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-       return mem_acc(instr->op_size, instr);
+        instr->instr = INSTR_MOV;
+        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+        return mem_acc(instr->op_size, instr);
 
     case 0xA2: /* mov al, <addr> */
-       instr->instr = INSTR_MOV;
-       instr->op_size = BYTE;
+        instr->instr = INSTR_MOV;
+        instr->op_size = BYTE;
         GET_OP_SIZE_FOR_BYTE(tmp_size);
-       return acc_mem(tmp_size, instr);
+        return acc_mem(tmp_size, instr);
 
     case 0xA3: /* mov ax/eax, <addr> */
-       instr->instr = INSTR_MOV;
-       GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-       return acc_mem(instr->op_size, instr);
+        instr->instr = INSTR_MOV;
+        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+        return acc_mem(instr->op_size, instr);
 
     case 0xA4: /* movsb */
-       instr->instr = INSTR_MOVS;
-       instr->op_size = BYTE;
+        instr->instr = INSTR_MOVS;
+        instr->op_size = BYTE;
         return DECODE_success;
             
     case 0xA5: /* movsw/movsl */
-       instr->instr = INSTR_MOVS;
-       GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-       return DECODE_success;
+        instr->instr = INSTR_MOVS;
+        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+        return DECODE_success;
     
     case 0xAA: /* stosb */
-       instr->instr = INSTR_STOS;
-       instr->op_size = BYTE;
+        instr->instr = INSTR_STOS;
+        instr->op_size = BYTE;
         return DECODE_success;
 
     case 0xAB: /* stosw/stosl */
-       instr->instr = INSTR_STOS;
-       GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-       return DECODE_success;
+        instr->instr = INSTR_STOS;
+        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+        return DECODE_success;
                     
     case 0xC6:
-       if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm8, m8 */
-           instr->instr = INSTR_MOV;
-           instr->op_size = BYTE;
-
-           instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
-           instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
-           instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
+        if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm8, m8 */
+            instr->instr = INSTR_MOV;
+            instr->op_size = BYTE;
+
+            instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
+            instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
+            instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
             
             return DECODE_success;
-       } else
-           return DECODE_failure;
+        } else
+            return DECODE_failure;
             
     case 0xC7:
-       if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm16/32, m16/32 */
-           instr->instr = INSTR_MOV;
-           GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-
-           instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
-           instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
-           instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
+        if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm16/32, m16/32 */
+            instr->instr = INSTR_MOV;
+            GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+
+            instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
+            instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
+            instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
             
             return DECODE_success;
-       } else
-           return DECODE_failure;
+        } else
+            return DECODE_failure;
 
     case 0xF6:
-       if (((opcode[1] >> 3) & 7) == 0) { /* testb $imm8, m8 */
-           instr->instr = INSTR_TEST;
-           instr->op_size = BYTE;
-
-           instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
-           instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
-           instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
-
-           return DECODE_success;
-       } else
-           return DECODE_failure;
+        if (((opcode[1] >> 3) & 7) == 0) { /* testb $imm8, m8 */
+            instr->instr = INSTR_TEST;
+            instr->op_size = BYTE;
+
+            instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
+            instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
+            instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
+
+            return DECODE_success;
+        } else
+            return DECODE_failure;
 
     case 0x0F:
-       break;
+        break;
 
     default:
-       printf("%x, This opcode isn't handled yet!\n", *opcode);
+        printf("%x, This opcode isn't handled yet!\n", *opcode);
         return DECODE_failure;
     }
 
     switch (*++opcode) {
     case 0xB6: /* movz m8, r16/r32 */
-       instr->instr = INSTR_MOVZ;
-       GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-       index = get_index(opcode + 1, rex);
-       instr->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
-       instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
-       return DECODE_success;
+        instr->instr = INSTR_MOVZ;
+        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+        index = get_index(opcode + 1, rex);
+        instr->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
+        instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
+        return DECODE_success;
 
     case 0xB7: /* movz m16, r32 */
-       instr->instr = INSTR_MOVZ;
-       index = get_index(opcode + 1, rex);
-       if (rex & 0x8) {
-          instr->op_size = LONG;
-          instr->operand[1] = mk_operand(QUAD, index, 0, REGISTER);
-       } else {
-          instr->op_size = WORD;
-          instr->operand[1] = mk_operand(LONG, index, 0, REGISTER);
-       }
-       instr->operand[0] = mk_operand(instr->op_size, 0, 0, MEMORY);
-       return DECODE_success;
+        instr->instr = INSTR_MOVZ;
+        index = get_index(opcode + 1, rex);
+        if (rex & 0x8) {
+            instr->op_size = LONG;
+            instr->operand[1] = mk_operand(QUAD, index, 0, REGISTER);
+        } else {
+            instr->op_size = WORD;
+            instr->operand[1] = mk_operand(LONG, index, 0, REGISTER);
+        }
+        instr->operand[0] = mk_operand(instr->op_size, 0, 0, MEMORY);
+        return DECODE_success;
 
     default:
-       printf("0f %x, This opcode isn't handled yet\n", *opcode);
-       return DECODE_failure;
+        printf("0f %x, This opcode isn't handled yet\n", *opcode);
+        return DECODE_failure;
     }
 }
 
@@ -599,7 +599,7 @@
 }
 
 void send_mmio_req(unsigned char type, unsigned long gpa, 
-          unsigned long count, int size, long value, int dir, int pvalid)
+                   unsigned long count, int size, long value, int dir, int 
pvalid)
 {
     struct vcpu *d = current;
     vcpu_iodata_t *vio;
@@ -636,12 +636,12 @@
     p->df = regs->eflags & EF_DF ? 1 : 0;
 
     if (pvalid) {
-       if (vmx_paging_enabled(current))
-           p->u.pdata = (void *) gva_to_gpa(value);
+        if (vmx_paging_enabled(current))
+            p->u.pdata = (void *) gva_to_gpa(value);
         else
-           p->u.pdata = (void *) value; /* guest VA == guest PA */
+            p->u.pdata = (void *) value; /* guest VA == guest PA */
     } else
-       p->u.data = value;
+        p->u.data = value;
 
     p->state = STATE_IOREQ_READY;
 
@@ -656,7 +656,7 @@
 }
 
 static void mmio_operands(int type, unsigned long gpa, struct instruction 
*inst,
-               struct mi_per_cpu_info *mpcip, struct cpu_user_regs *regs)
+                          struct mi_per_cpu_info *mpcip, struct cpu_user_regs 
*regs)
 {
     unsigned long value = 0;
     int index, size;
@@ -669,24 +669,24 @@
     mpcip->operand[1] = inst->operand[1]; /* destination */
 
     if (inst->operand[0] & REGISTER) { /* dest is memory */
-       index = operand_index(inst->operand[0]);
-       value = get_reg_value(size, index, 0, regs);
-       send_mmio_req(type, gpa, 1, size, value, IOREQ_WRITE, 0);
+        index = operand_index(inst->operand[0]);
+        value = get_reg_value(size, index, 0, regs);
+        send_mmio_req(type, gpa, 1, size, value, IOREQ_WRITE, 0);
     } else if (inst->operand[0] & IMMEDIATE) { /* dest is memory */
-       value = inst->immediate;
-       send_mmio_req(type, gpa, 1, size, value, IOREQ_WRITE, 0);
+        value = inst->immediate;
+        send_mmio_req(type, gpa, 1, size, value, IOREQ_WRITE, 0);
     } else if (inst->operand[0] & MEMORY) { /* dest is register */
-       /* send the request and wait for the value */
-       send_mmio_req(type, gpa, 1, size, 0, IOREQ_READ, 0);
+        /* send the request and wait for the value */
+        send_mmio_req(type, gpa, 1, size, 0, IOREQ_READ, 0);
     } else {
-       printf("mmio_operands: invalid operand\n");
-       domain_crash_synchronous();
+        printf("mmio_operands: invalid operand\n");
+        domain_crash_synchronous();
     }
 }
 
 #define GET_REPEAT_COUNT() \
      (mmio_inst.flags & REPZ ? (vm86 ? regs->ecx & 0xFFFF : regs->ecx) : 1)
-       
+ 
 void handle_mmio(unsigned long va, unsigned long gpa)
 {
     unsigned long eip, eflags, cs;
@@ -721,11 +721,11 @@
     init_instruction(&mmio_inst);
     
     if (vmx_decode(inst, &mmio_inst) == DECODE_failure) {
-       printf("mmio opcode: va 0x%lx, gpa 0x%lx, len %ld:",
-               va, gpa, inst_len);
-       for (i = 0; i < inst_len; i++)
-           printf(" %02x", inst[i] & 0xFF);
-       printf("\n");
+        printf("mmio opcode: va 0x%lx, gpa 0x%lx, len %ld:",
+               va, gpa, inst_len);
+        for (i = 0; i < inst_len; i++)
+            printf(" %02x", inst[i] & 0xFF);
+        printf("\n");
         domain_crash_synchronous();
     }
 
@@ -734,116 +734,116 @@
 
     switch (mmio_inst.instr) {
     case INSTR_MOV:
-       mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
-       break;
+        mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
+        break;
 
     case INSTR_MOVS:
     {
-       unsigned long count = GET_REPEAT_COUNT();
-       unsigned long size = mmio_inst.op_size;
-       int sign = regs->eflags & EF_DF ? -1 : 1;
-       unsigned long addr = 0;
-       int dir;
-
-       /* determine non-MMIO address */
-       if (vm86) {
-           unsigned long seg;
-
-           __vmread(GUEST_ES_SELECTOR, &seg);
-           if (((seg << 4) + (regs->edi & 0xFFFF)) == va) {
-               dir = IOREQ_WRITE;
-               __vmread(GUEST_DS_SELECTOR, &seg);
-               addr = (seg << 4) + (regs->esi & 0xFFFF);
-           } else {
-               dir = IOREQ_READ;
-               addr = (seg << 4) + (regs->edi & 0xFFFF);
-           }
-       } else {
-           if (va == regs->edi) {
-               dir = IOREQ_WRITE;
-               addr = regs->esi;
-           } else {
-               dir = IOREQ_READ;
-               addr = regs->edi;
-           }
-       }
-
-       mpcip->flags = mmio_inst.flags;
-       mpcip->instr = mmio_inst.instr;
-
-       /*
-        * In case of a movs spanning multiple pages, we break the accesses
-        * up into multiple pages (the device model works with non-continguous
-        * physical guest pages). To copy just one page, we adjust %ecx and
-        * do not advance %eip so that the next "rep movs" copies the next page.
-        * Unaligned accesses, for example movsl starting at PGSZ-2, are
-        * turned into a single copy where we handle the overlapping memory
-        * copy ourself. After this copy succeeds, "rep movs" is executed
-        * again.
-        */
-       if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
-           unsigned long value = 0;
-
-           mpcip->flags |= OVERLAP;
-
-           regs->eip -= inst_len; /* do not advance %eip */
-
-           if (dir == IOREQ_WRITE)
-               vmx_copy(&value, addr, size, VMX_COPY_IN);
-           send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, 0);
-       } else {
-           if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) {
-               regs->eip -= inst_len; /* do not advance %eip */
-
-               if (sign > 0)
-                   count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
-               else
-                   count = (addr & ~PAGE_MASK) / size;
-           }
-
-           send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, 1);
-       }
+        unsigned long count = GET_REPEAT_COUNT();
+        unsigned long size = mmio_inst.op_size;
+        int sign = regs->eflags & EF_DF ? -1 : 1;
+        unsigned long addr = 0;
+        int dir;
+
+        /* determine non-MMIO address */
+        if (vm86) {
+            unsigned long seg;
+
+            __vmread(GUEST_ES_SELECTOR, &seg);
+            if (((seg << 4) + (regs->edi & 0xFFFF)) == va) {
+                dir = IOREQ_WRITE;
+                __vmread(GUEST_DS_SELECTOR, &seg);
+                addr = (seg << 4) + (regs->esi & 0xFFFF);
+            } else {
+                dir = IOREQ_READ;
+                addr = (seg << 4) + (regs->edi & 0xFFFF);
+            }
+        } else {
+            if (va == regs->edi) {
+                dir = IOREQ_WRITE;
+                addr = regs->esi;
+            } else {
+                dir = IOREQ_READ;
+                addr = regs->edi;
+            }
+        }
+
+        mpcip->flags = mmio_inst.flags;
+        mpcip->instr = mmio_inst.instr;
+
+        /*
+         * In case of a movs spanning multiple pages, we break the accesses
+         * up into multiple pages (the device model works with non-continguous
+         * physical guest pages). To copy just one page, we adjust %ecx and
+         * do not advance %eip so that the next "rep movs" copies the next 
page.
+         * Unaligned accesses, for example movsl starting at PGSZ-2, are
+         * turned into a single copy where we handle the overlapping memory
+         * copy ourself. After this copy succeeds, "rep movs" is executed
+         * again.
+         */
+        if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
+            unsigned long value = 0;
+
+            mpcip->flags |= OVERLAP;
+
+            regs->eip -= inst_len; /* do not advance %eip */
+
+            if (dir == IOREQ_WRITE)
+                vmx_copy(&value, addr, size, VMX_COPY_IN);
+            send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, 0);
+        } else {
+            if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) 
{
+                regs->eip -= inst_len; /* do not advance %eip */
+
+                if (sign > 0)
+                    count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
+                else
+                    count = (addr & ~PAGE_MASK) / size;
+            }
+
+            send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, 1);
+        }
         break;
     }
 
     case INSTR_MOVZ:
-       mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
-       break;
+        mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
+        break;
 
     case INSTR_STOS:
-       /*
-        * Since the destination is always in (contiguous) mmio space we don't
-        * need to break it up into pages.
-        */
-       mpcip->flags = mmio_inst.flags;
-       mpcip->instr = mmio_inst.instr;
+        /*
+         * Since the destination is always in (contiguous) mmio space we don't
+         * need to break it up into pages.
+         */
+        mpcip->flags = mmio_inst.flags;
+        mpcip->instr = mmio_inst.instr;
         send_mmio_req(IOREQ_TYPE_COPY, gpa,
-           GET_REPEAT_COUNT(), mmio_inst.op_size, regs->eax, IOREQ_WRITE, 0);
-       break;
+                      GET_REPEAT_COUNT(), mmio_inst.op_size, regs->eax, 
IOREQ_WRITE, 0);
+        break;
 
     case INSTR_OR:
-       mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mpcip, regs);
-       break;
+        mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mpcip, regs);
+        break;
 
     case INSTR_AND:
-       mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mpcip, regs);
-       break;
+        mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mpcip, regs);
+        break;
 
     case INSTR_XOR:
-       mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mpcip, regs);
-       break;
+        mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mpcip, regs);
+        break;
 
     case INSTR_CMP:
-       mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
-       break;
+        mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
+        break;
 
     case INSTR_TEST:
-       mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
-       break;
+        mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
+        break;
 
     default:
-       printf("Unhandled MMIO instruction\n");
-       domain_crash_synchronous();
+        printf("Unhandled MMIO instruction\n");
+        domain_crash_synchronous();
     }
 }
 
diff -r 4508c22dc458 -r 3feb7fa331ed xen/arch/x86/vmx_vmcs.c
--- a/xen/arch/x86/vmx_vmcs.c   Sun Sep 11 16:36:24 2005
+++ b/xen/arch/x86/vmx_vmcs.c   Sun Sep 11 16:44:23 2005
@@ -179,10 +179,10 @@
     p = map_domain_page(mpfn);
     d->domain->arch.vmx_platform.shared_page_va = (unsigned long)p;
 
-   VMX_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(d->domain));
-
-   clear_bit(iopacket_port(d->domain), 
-             &d->domain->shared_info->evtchn_mask[0]);
+    VMX_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(d->domain));
+
+    clear_bit(iopacket_port(d->domain), 
+              &d->domain->shared_info->evtchn_mask[0]);
 
     return 0;
 }
@@ -497,7 +497,7 @@
     __vmptrst(old_phys_ptr);
     if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
         printk("modify_vmcs: load_vmcs failed: VMCS = %lx\n",
-                (unsigned long) vmcs_phys_ptr);
+               (unsigned long) vmcs_phys_ptr);
         return -EINVAL; 
     }
     load_cpu_user_regs(regs);
diff -r 4508c22dc458 -r 3feb7fa331ed xen/include/asm-x86/shadow_64.h
--- a/xen/include/asm-x86/shadow_64.h   Sun Sep 11 16:36:24 2005
+++ b/xen/include/asm-x86/shadow_64.h   Sun Sep 11 16:44:23 2005
@@ -353,7 +353,7 @@
             entry_remove_flags(sle, _PAGE_PSE);
 
             if ( shadow_mode_log_dirty(d) ||
-                !(entry_get_flags(gle) & _PAGE_DIRTY) )
+                 !(entry_get_flags(gle) & _PAGE_DIRTY) )
             {
                 pgentry_64_t *l1_p;
                 int i;
@@ -365,8 +365,9 @@
                 unmap_domain_page(l1_p);
             }
         } else {
-            sle = entry_from_pfn(smfn,
-                                (entry_get_flags(gle) | _PAGE_RW | 
_PAGE_ACCESSED) & ~_PAGE_AVAIL);
+            sle = entry_from_pfn(
+                smfn,
+                (entry_get_flags(gle) | _PAGE_RW | _PAGE_ACCESSED) & 
~_PAGE_AVAIL);
             entry_add_flags(gle, _PAGE_ACCESSED);
         }
         // XXX mafetter: Hmm...
diff -r 4508c22dc458 -r 3feb7fa331ed xen/include/asm-x86/shadow_ops.h
--- a/xen/include/asm-x86/shadow_ops.h  Sun Sep 11 16:36:24 2005
+++ b/xen/include/asm-x86/shadow_ops.h  Sun Sep 11 16:44:23 2005
@@ -127,4 +127,4 @@
 #define guest_va_to_l1mfn       va_to_l1mfn
 #endif
 
-#endif /* _XEN_SHADOW_OPS_H */
+#endif /* _XEN_SHADOW_OPS_H */
diff -r 4508c22dc458 -r 3feb7fa331ed xen/include/asm-x86/vmx.h
--- a/xen/include/asm-x86/vmx.h Sun Sep 11 16:36:24 2005
+++ b/xen/include/asm-x86/vmx.h Sun Sep 11 16:44:23 2005
@@ -150,9 +150,9 @@
 #define TYPE_MOV_TO_CR                  (0 << 4) 
 #define TYPE_MOV_FROM_CR                (1 << 4)
 #define TYPE_CLTS                       (2 << 4)
-#define        TYPE_LMSW                       (3 << 4)
+#define TYPE_LMSW   (3 << 4)
 #define CONTROL_REG_ACCESS_REG          0xf00   /* 10:8, general purpose 
register */
-#define        LMSW_SOURCE_DATA                (0xFFFF << 16) /* 16:31 lmsw 
source */
+#define LMSW_SOURCE_DATA  (0xFFFF << 16) /* 16:31 lmsw source */
 #define REG_EAX                         (0 << 8) 
 #define REG_ECX                         (1 << 8) 
 #define REG_EDX                         (2 << 8) 
diff -r 4508c22dc458 -r 3feb7fa331ed xen/include/asm-x86/vmx_platform.h
--- a/xen/include/asm-x86/vmx_platform.h        Sun Sep 11 16:36:24 2005
+++ b/xen/include/asm-x86/vmx_platform.h        Sun Sep 11 16:44:23 2005
@@ -16,6 +16,7 @@
  * Place - Suite 330, Boston, MA 02111-1307 USA.
  *
  */
+
 #ifndef __ASM_X86_VMX_PLATFORM_H__
 #define __ASM_X86_VMX_PLATFORM_H__
 
@@ -52,19 +53,19 @@
 #define REPNZ   0x2
 #define OVERLAP 0x4
 
-#define        INSTR_PIO       1
-#define INSTR_OR       2
-#define INSTR_AND      3
-#define INSTR_XOR      4
-#define INSTR_CMP      5
-#define INSTR_MOV      6
-#define INSTR_MOVS     7
-#define INSTR_MOVZ     8
-#define INSTR_STOS     9
-#define INSTR_TEST     10
+#define INSTR_PIO 1
+#define INSTR_OR 2
+#define INSTR_AND 3
+#define INSTR_XOR 4
+#define INSTR_CMP 5
+#define INSTR_MOV 6
+#define INSTR_MOVS 7
+#define INSTR_MOVZ 8
+#define INSTR_STOS 9
+#define INSTR_TEST 10
 
 struct instruction {
-    __s8    instr;     /* instruction type */
+    __s8    instr; /* instruction type */
     __s16   op_size;    /* the operand's bit size, e.g. 16-bit or 32-bit */
     __u64   immediate;
     __u16   seg_sel;    /* segmentation selector */
@@ -76,18 +77,18 @@
 
 struct mi_per_cpu_info {
     int                    flags;
-    int                           instr;               /* instruction */
-    unsigned long          operand[2];         /* operands */
-    unsigned long          immediate;          /* immediate portion */
-    struct cpu_user_regs   *inst_decoder_regs; /* current context */
+    int      instr;  /* instruction */
+    unsigned long          operand[2];  /* operands */
+    unsigned long          immediate;  /* immediate portion */
+    struct cpu_user_regs   *inst_decoder_regs; /* current context */
 };
 
 struct virtual_platform_def {
-    unsigned long          *real_mode_data;    /* E820, etc. */
+    unsigned long          *real_mode_data; /* E820, etc. */
     unsigned long          shared_page_va;
     struct vmx_virpit_t    vmx_pit;
     struct vmx_handler_t   vmx_handler;
-    struct mi_per_cpu_info mpci;               /* MMIO */
+    struct mi_per_cpu_info mpci;  /* MMIO */
 };
 
 extern void handle_mmio(unsigned long, unsigned long);
diff -r 4508c22dc458 -r 3feb7fa331ed xen/include/asm-x86/vmx_virpit.h
--- a/xen/include/asm-x86/vmx_virpit.h  Sun Sep 11 16:36:24 2005
+++ b/xen/include/asm-x86/vmx_virpit.h  Sun Sep 11 16:44:23 2005
@@ -1,5 +1,6 @@
 #ifndef _VMX_VIRPIT_H
 #define _VMX_VIRPIT_H
+
 #include <xen/config.h>
 #include <xen/init.h>
 #include <xen/lib.h>
@@ -17,14 +18,14 @@
 
 struct vmx_virpit_t {
     /* for simulation of counter 0 in mode 2*/
-    int vector;                                /* the pit irq vector */
-    unsigned int period;               /* the frequency. e.g. 10ms*/
+    int vector;    /* the pit irq vector */
+    unsigned int period;  /* the frequency. e.g. 10ms*/
     s_time_t scheduled;                 /* scheduled timer interrupt */
-    unsigned int channel;              /* the pit channel, counter 0~2 */
+    unsigned int channel;  /* the pit channel, counter 0~2 */
     u64  *intr_bitmap;
-    unsigned int pending_intr_nr;      /* the couner for pending timer 
interrupts */
-    unsigned long long inject_point;   /* the time inject virt intr */
-    struct ac_timer pit_timer;         /* periodic timer for mode 2*/
+    unsigned int pending_intr_nr; /* the couner for pending timer interrupts */
+    unsigned long long inject_point; /* the time inject virt intr */
+    struct ac_timer pit_timer;  /* periodic timer for mode 2*/
     int first_injected;                 /* flag to prevent shadow window */
 
     /* virtual PIT state for handle related I/O */
@@ -32,8 +33,8 @@
     int count_LSB_latched;
     int count_MSB_latched;
 
-    unsigned int count;                /* the 16 bit channel count */
-    unsigned int init_val;     /* the init value for the counter */
+    unsigned int count;  /* the 16 bit channel count */
+    unsigned int init_val; /* the init value for the counter */
 
 } ;
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.