[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Move mmio operation structure from domain to vcpu.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 1b4ad6eb6968ce2258a7c2338d5ff9dab37388e4
# Parent  62d815160f01020359ef78705c5ce13614f766b2
Move mmio operation structure from domain to vcpu.
Also do some cleanup.

Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx>
Signed-off-by: Nakajima Jun <nakajima.jun@xxxxxxxxx>

diff -r 62d815160f01 -r 1b4ad6eb6968 xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c        Fri Oct  7 15:49:29 2005
+++ b/xen/arch/x86/vmx.c        Fri Oct  7 22:17:24 2005
@@ -659,14 +659,14 @@
 static void vmx_io_instruction(struct cpu_user_regs *regs,
                                unsigned long exit_qualification, unsigned long 
inst_len)
 {
-    struct mi_per_cpu_info *mpcip;
+    struct mmio_op *mmio_opp;
     unsigned long eip, cs, eflags;
     unsigned long port, size, dir;
     int vm86;
 
-    mpcip = &current->domain->arch.vmx_platform.mpci;
-    mpcip->instr = INSTR_PIO;
-    mpcip->flags = 0;
+    mmio_opp = &current->arch.arch_vmx.mmio_op;
+    mmio_opp->instr = INSTR_PIO;
+    mmio_opp->flags = 0;
 
     __vmread(GUEST_RIP, &eip);
     __vmread(GUEST_CS_SELECTOR, &cs);
@@ -700,7 +700,7 @@
             addr = dir == IOREQ_WRITE ? regs->esi : regs->edi;
 
         if (test_bit(5, &exit_qualification)) { /* "rep" prefix */
-            mpcip->flags |= REPZ;
+            mmio_opp->flags |= REPZ;
             count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
         }
 
@@ -711,7 +711,7 @@
         if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
             unsigned long value = 0;
 
-            mpcip->flags |= OVERLAP;
+            mmio_opp->flags |= OVERLAP;
             if (dir == IOREQ_WRITE)
                 vmx_copy(&value, addr, size, VMX_COPY_IN);
             send_pio_req(regs, port, 1, size, value, dir, 0);
@@ -1695,7 +1695,7 @@
                         (unsigned long)regs.eax, (unsigned long)regs.ebx,
                         (unsigned long)regs.ecx, (unsigned long)regs.edx,
                         (unsigned long)regs.esi, (unsigned long)regs.edi);
-            v->domain->arch.vmx_platform.mpci.inst_decoder_regs = &regs;
+            v->arch.arch_vmx.mmio_op.inst_decoder_regs = &regs;
 
             if (!(error = vmx_do_page_fault(va, &regs))) {
                 /*
diff -r 62d815160f01 -r 1b4ad6eb6968 xen/arch/x86/vmx_io.c
--- a/xen/arch/x86/vmx_io.c     Fri Oct  7 15:49:29 2005
+++ b/xen/arch/x86/vmx_io.c     Fri Oct  7 22:17:24 2005
@@ -1,5 +1,5 @@
 /*
- * vmx_io.c: handling I/O, interrupts related VMX entry/exit 
+ * vmx_io.c: handling I/O, interrupts related VMX entry/exit
  * Copyright (c) 2004, Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
 #ifdef CONFIG_VMX
 #if defined (__i386__)
 void load_cpu_user_regs(struct cpu_user_regs *regs)
-{ 
+{
     /*
      * Write the guest register value into VMCS
      */
@@ -52,7 +52,7 @@
     __vmwrite(GUEST_RFLAGS, regs->eflags);
     if (regs->eflags & EF_TF)
         __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
-    else 
+    else
         __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
 
     __vmwrite(GUEST_CS_SELECTOR, regs->cs);
@@ -189,7 +189,7 @@
     __vmwrite(GUEST_RFLAGS, regs->rflags);
     if (regs->rflags & EF_TF)
         __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
-    else 
+    else
         __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
 
     __vmwrite(GUEST_CS_SELECTOR, regs->cs);
@@ -265,52 +265,52 @@
     }
 
     switch (index) {
-    case 0: 
+    case 0:
         __set_reg_value(&regs->rax, size, value);
         break;
-    case 1: 
+    case 1:
         __set_reg_value(&regs->rcx, size, value);
         break;
-    case 2: 
+    case 2:
         __set_reg_value(&regs->rdx, size, value);
         break;
-    case 3: 
+    case 3:
         __set_reg_value(&regs->rbx, size, value);
         break;
-    case 4: 
+    case 4:
         __set_reg_value(&regs->rsp, size, value);
         break;
-    case 5: 
+    case 5:
         __set_reg_value(&regs->rbp, size, value);
         break;
-    case 6: 
+    case 6:
         __set_reg_value(&regs->rsi, size, value);
         break;
-    case 7: 
+    case 7:
         __set_reg_value(&regs->rdi, size, value);
         break;
-    case 8: 
+    case 8:
         __set_reg_value(&regs->r8, size, value);
         break;
-    case 9: 
+    case 9:
         __set_reg_value(&regs->r9, size, value);
         break;
-    case 10: 
+    case 10:
         __set_reg_value(&regs->r10, size, value);
         break;
-    case 11: 
+    case 11:
         __set_reg_value(&regs->r11, size, value);
         break;
-    case 12: 
+    case 12:
         __set_reg_value(&regs->r12, size, value);
         break;
-    case 13: 
+    case 13:
         __set_reg_value(&regs->r13, size, value);
         break;
-    case 14: 
+    case 14:
         __set_reg_value(&regs->r14, size, value);
         break;
-    case 15: 
+    case 15:
         __set_reg_value(&regs->r15, size, value);
         break;
     default:
@@ -391,7 +391,7 @@
 }
 
 static void vmx_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
-                           struct mi_per_cpu_info *mpcip)
+                           struct mmio_op *mmio_opp)
 {
     unsigned long old_eax;
     int sign = p->df ? -1 : 1;
@@ -399,15 +399,15 @@
     if (p->dir == IOREQ_WRITE) {
         if (p->pdata_valid) {
             regs->esi += sign * p->count * p->size;
-            if (mpcip->flags & REPZ)
+            if (mmio_opp->flags & REPZ)
                 regs->ecx -= p->count;
         }
     } else {
-        if (mpcip->flags & OVERLAP) {
+        if (mmio_opp->flags & OVERLAP) {
             unsigned long addr;
 
             regs->edi += sign * p->count * p->size;
-            if (mpcip->flags & REPZ)
+            if (mmio_opp->flags & REPZ)
                 regs->ecx -= p->count;
 
             addr = regs->edi;
@@ -416,7 +416,7 @@
             vmx_copy(&p->u.data, addr, p->size, VMX_COPY_OUT);
         } else if (p->pdata_valid) {
             regs->edi += sign * p->count * p->size;
-            if (mpcip->flags & REPZ)
+            if (mmio_opp->flags & REPZ)
                 regs->ecx -= p->count;
         } else {
             old_eax = regs->eax;
@@ -439,18 +439,18 @@
 }
 
 static void vmx_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
-                            struct mi_per_cpu_info *mpcip)
+                            struct mmio_op *mmio_opp)
 {
     int sign = p->df ? -1 : 1;
     int size = -1, index = -1;
     unsigned long value = 0, diff = 0;
     unsigned long src, dst;
 
-    src = mpcip->operand[0];
-    dst = mpcip->operand[1];
+    src = mmio_opp->operand[0];
+    dst = mmio_opp->operand[1];
     size = operand_size(src);
 
-    switch (mpcip->instr) {
+    switch (mmio_opp->instr) {
     case INSTR_MOV:
         if (dst & REGISTER) {
             index = operand_index(dst);
@@ -475,7 +475,7 @@
         regs->esi += sign * p->count * p->size;
         regs->edi += sign * p->count * p->size;
 
-        if ((mpcip->flags & OVERLAP) && p->dir == IOREQ_READ) {
+        if ((mmio_opp->flags & OVERLAP) && p->dir == IOREQ_READ) {
             unsigned long addr = regs->edi;
 
             if (sign > 0)
@@ -483,14 +483,14 @@
             vmx_copy(&p->u.data, addr, p->size, VMX_COPY_OUT);
         }
 
-        if (mpcip->flags & REPZ)
+        if (mmio_opp->flags & REPZ)
             regs->ecx -= p->count;
         break;
 
     case INSTR_STOS:
         sign = p->df ? -1 : 1;
         regs->edi += sign * p->count * p->size;
-        if (mpcip->flags & REPZ)
+        if (mmio_opp->flags & REPZ)
             regs->ecx -= p->count;
         break;
 
@@ -500,7 +500,7 @@
             value = get_reg_value(size, index, 0, regs);
             diff = (unsigned long) p->u.data & value;
         } else if (src & IMMEDIATE) {
-            value = mpcip->immediate;
+            value = mmio_opp->immediate;
             diff = (unsigned long) p->u.data & value;
         } else if (src & MEMORY) {
             index = operand_index(dst);
@@ -527,7 +527,7 @@
             value = get_reg_value(size, index, 0, regs);
             diff = (unsigned long) p->u.data | value;
         } else if (src & IMMEDIATE) {
-            value = mpcip->immediate;
+            value = mmio_opp->immediate;
             diff = (unsigned long) p->u.data | value;
         } else if (src & MEMORY) {
             index = operand_index(dst);
@@ -554,7 +554,7 @@
             value = get_reg_value(size, index, 0, regs);
             diff = (unsigned long) p->u.data ^ value;
         } else if (src & IMMEDIATE) {
-            value = mpcip->immediate;
+            value = mmio_opp->immediate;
             diff = (unsigned long) p->u.data ^ value;
         } else if (src & MEMORY) {
             index = operand_index(dst);
@@ -581,7 +581,7 @@
             value = get_reg_value(size, index, 0, regs);
             diff = (unsigned long) p->u.data - value;
         } else if (src & IMMEDIATE) {
-            value = mpcip->immediate;
+            value = mmio_opp->immediate;
             diff = (unsigned long) p->u.data - value;
         } else if (src & MEMORY) {
             index = operand_index(dst);
@@ -608,7 +608,7 @@
             index = operand_index(src);
             value = get_reg_value(size, index, 0, regs);
         } else if (src & IMMEDIATE) {
-            value = mpcip->immediate;
+            value = mmio_opp->immediate;
         } else if (src & MEMORY) {
             index = operand_index(dst);
             value = get_reg_value(size, index, 0, regs);
@@ -629,21 +629,21 @@
     load_cpu_user_regs(regs);
 }
 
-void vmx_io_assist(struct vcpu *v) 
+void vmx_io_assist(struct vcpu *v)
 {
     vcpu_iodata_t *vio;
     ioreq_t *p;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
-    struct mi_per_cpu_info *mpci_p;
+    struct mmio_op *mmio_opp;
     struct cpu_user_regs *inst_decoder_regs;
 
-    mpci_p = &v->domain->arch.vmx_platform.mpci;
-    inst_decoder_regs = mpci_p->inst_decoder_regs;
+    mmio_opp = &v->arch.arch_vmx.mmio_op;
+    inst_decoder_regs = mmio_opp->inst_decoder_regs;
 
     vio = get_vio(v->domain, v->vcpu_id);
 
     if (vio == 0) {
-        VMX_DBG_LOG(DBG_LEVEL_1, 
+        VMX_DBG_LOG(DBG_LEVEL_1,
                     "bad shared page: %lx", (unsigned long) vio);
         printf("bad shared page: %lx\n", (unsigned long) vio);
         domain_crash_synchronous();
@@ -660,15 +660,15 @@
             clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
 
             if (p->type == IOREQ_TYPE_PIO)
-                vmx_pio_assist(regs, p, mpci_p);
+                vmx_pio_assist(regs, p, mmio_opp);
             else
-                vmx_mmio_assist(regs, p, mpci_p);
+                vmx_mmio_assist(regs, p, mmio_opp);
         }
         /* else an interrupt send event raced us */
     }
 }
 
-int vmx_clear_pending_io_event(struct vcpu *v) 
+int vmx_clear_pending_io_event(struct vcpu *v)
 {
     struct domain *d = v->domain;
     int port = iopacket_port(d);
@@ -678,7 +678,7 @@
         clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
 
     /* Note: VMX domains may need upcalls as well */
-    if (!v->vcpu_info->evtchn_pending_sel) 
+    if (!v->vcpu_info->evtchn_pending_sel)
         clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
 
     /* clear the pending bit for port */
@@ -688,18 +688,18 @@
 /* Because we've cleared the pending events first, we need to guarantee that
  * all events to be handled by xen for VMX domains are taken care of here.
  *
- * interrupts are guaranteed to be checked before resuming guest. 
- * VMX upcalls have been already arranged for if necessary. 
+ * interrupts are guaranteed to be checked before resuming guest.
+ * VMX upcalls have been already arranged for if necessary.
  */
-void vmx_check_events(struct vcpu *d) 
-{
-    /* clear the event *before* checking for work. This should avoid 
+void vmx_check_events(struct vcpu *v)
+{
+    /* clear the event *before* checking for work. This should avoid
        the set-and-check races */
     if (vmx_clear_pending_io_event(current))
-        vmx_io_assist(d);
-}
-
-/* On exit from vmx_wait_io, we're guaranteed to have a I/O response from 
+        vmx_io_assist(v);
+}
+
+/* On exit from vmx_wait_io, we're guaranteed to have a I/O response from
    the device model */
 void vmx_wait_io()
 {
@@ -782,7 +782,7 @@
     return __fls(pintr[0]);
 }
 
-#define BSP_CPU(d)    (!(d->vcpu_id))
+#define BSP_CPU(v)    (!(v->vcpu_id))
 static inline void clear_extint(struct vcpu *v)
 {
     global_iodata_t *spg;
@@ -883,7 +883,7 @@
     return ((eflags & X86_EFLAGS_IF) == 0);
 }
 
-asmlinkage void vmx_intr_assist(void) 
+asmlinkage void vmx_intr_assist(void)
 {
     int intr_type = 0;
     int highest_vector;
@@ -945,19 +945,19 @@
     return;
 }
 
-void vmx_do_resume(struct vcpu *d) 
+void vmx_do_resume(struct vcpu *v)
 {
     vmx_stts();
 
-    if (event_pending(d)) {
-        vmx_check_events(d);
-
-        if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags))
+    if (event_pending(v)) {
+        vmx_check_events(v);
+
+        if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
             vmx_wait_io();
     }
 
     /* We can't resume the guest if we're waiting on I/O */
-    ASSERT(!test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags));
+    ASSERT(!test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags));
 }
 
 #endif /* CONFIG_VMX */
diff -r 62d815160f01 -r 1b4ad6eb6968 xen/arch/x86/vmx_platform.c
--- a/xen/arch/x86/vmx_platform.c       Fri Oct  7 15:49:29 2005
+++ b/xen/arch/x86/vmx_platform.c       Fri Oct  7 22:17:24 2005
@@ -22,8 +22,8 @@
 #include <xen/mm.h>
 #include <asm/shadow.h>
 #include <xen/domain_page.h>
-#include <asm/page.h> 
-#include <xen/event.h> 
+#include <asm/page.h>
+#include <xen/event.h>
 #include <xen/trace.h>
 #include <asm/vmx.h>
 #include <asm/vmx_platform.h>
@@ -69,16 +69,16 @@
     }
 }
 
-long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs) 
+long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
 {
     if (size == BYTE) {
-        switch (index) { 
+        switch (index) {
         case 0: /* %al */
             return (char)(regs->rax & 0xFF);
         case 1: /* %cl */
             return (char)(regs->rcx & 0xFF);
         case 2: /* %dl */
-            return (char)(regs->rdx & 0xFF); 
+            return (char)(regs->rdx & 0xFF);
         case 3: /* %bl */
             return (char)(regs->rbx & 0xFF);
         case 4: /* %ah */
@@ -90,7 +90,7 @@
         case 7: /* %bh */
             return (char)((regs->rbx & 0xFF00) >> 8);
         default:
-            printf("Error: (get_reg_value) Invalid index value\n"); 
+            printf("Error: (get_reg_value) Invalid index value\n");
             domain_crash_synchronous();
         }
         /* NOTREACHED */
@@ -114,7 +114,7 @@
     case 14: return __get_reg_value(regs->r14, size);
     case 15: return __get_reg_value(regs->r15, size);
     default:
-        printf("Error: (get_reg_value) Invalid index value\n"); 
+        printf("Error: (get_reg_value) Invalid index value\n");
         domain_crash_synchronous();
     }
 }
@@ -131,7 +131,7 @@
 }
 
 static inline long __get_reg_value(unsigned long reg, int size)
-{                    
+{
     switch(size) {
     case WORD:
         return (short)(reg & 0xFFFF);
@@ -144,15 +144,15 @@
 }
 
 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
-{                    
+{
     if (size == BYTE) {
-        switch (index) { 
+        switch (index) {
         case 0: /* %al */
             return (char)(regs->eax & 0xFF);
         case 1: /* %cl */
             return (char)(regs->ecx & 0xFF);
         case 2: /* %dl */
-            return (char)(regs->edx & 0xFF); 
+            return (char)(regs->edx & 0xFF);
         case 3: /* %bl */
             return (char)(regs->ebx & 0xFF);
         case 4: /* %ah */
@@ -164,7 +164,7 @@
         case 7: /* %bh */
             return (char)((regs->ebx & 0xFF00) >> 8);
         default:
-            printf("Error: (get_reg_value) Invalid index value\n"); 
+            printf("Error: (get_reg_value) Invalid index value\n");
             domain_crash_synchronous();
         }
     }
@@ -179,7 +179,7 @@
     case 6: return __get_reg_value(regs->esi, size);
     case 7: return __get_reg_value(regs->edi, size);
     default:
-        printf("Error: (get_reg_value) Invalid index value\n"); 
+        printf("Error: (get_reg_value) Invalid index value\n");
         domain_crash_synchronous();
     }
 }
@@ -283,9 +283,9 @@
 
     //Only one operand in the instruction is register
     if (mod == 3) {
-        return (rm + (rex_b << 3)); 
+        return (rm + (rex_b << 3));
     } else {
-        return (reg + (rex_r << 3)); 
+        return (reg + (rex_r << 3));
     }
     return 0;
 }
@@ -299,7 +299,7 @@
 
     mmio_inst->operand[0] = 0;
     mmio_inst->operand[1] = 0;
-        
+
     mmio_inst->flags = 0;
 }
 
@@ -498,12 +498,12 @@
         instr->instr = INSTR_MOVS;
         instr->op_size = BYTE;
         return DECODE_success;
-            
+
     case 0xA5: /* movsw/movsl */
         instr->instr = INSTR_MOVS;
         GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
         return DECODE_success;
-    
+
     case 0xAA: /* stosb */
         instr->instr = INSTR_STOS;
         instr->op_size = BYTE;
@@ -513,7 +513,7 @@
         instr->instr = INSTR_STOS;
         GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
         return DECODE_success;
-                    
+
     case 0xC6:
         if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm8, m8 */
             instr->instr = INSTR_MOV;
@@ -522,11 +522,11 @@
             instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
             instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
             instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
-            
+
             return DECODE_success;
         } else
             return DECODE_failure;
-            
+
     case 0xC7:
         if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm16/32, m16/32 */
             instr->instr = INSTR_MOV;
@@ -535,7 +535,7 @@
             instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
             instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
             instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
-            
+
             return DECODE_success;
         } else
             return DECODE_failure;
@@ -598,34 +598,34 @@
     return inst_len;
 }
 
-void send_mmio_req(unsigned char type, unsigned long gpa, 
+void send_mmio_req(unsigned char type, unsigned long gpa,
                    unsigned long count, int size, long value, int dir, int 
pvalid)
 {
-    struct vcpu *d = current;
+    struct vcpu *v = current;
     vcpu_iodata_t *vio;
     ioreq_t *p;
     int vm86;
     struct cpu_user_regs *regs;
     extern long evtchn_send(int lport);
 
-    regs = current->domain->arch.vmx_platform.mpci.inst_decoder_regs;
-
-    vio = get_vio(d->domain, d->vcpu_id);
+    regs = current->arch.arch_vmx.mmio_op.inst_decoder_regs;
+
+    vio = get_vio(v->domain, v->vcpu_id);
     if (vio == NULL) {
         printf("bad shared page\n");
-        domain_crash_synchronous(); 
+        domain_crash_synchronous();
     }
 
     p = &vio->vp_ioreq;
 
     vm86 = regs->eflags & X86_EFLAGS_VM;
 
-    if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) {
+    if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
         printf("VMX I/O has not yet completed\n");
         domain_crash_synchronous();
     }
 
-    set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags);
+    set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
     p->dir = dir;
     p->pdata_valid = pvalid;
 
@@ -647,27 +647,27 @@
 
     if (vmx_mmio_intercept(p)){
         p->state = STATE_IORESP_READY;
-        vmx_io_assist(d);
+        vmx_io_assist(v);
         return;
     }
 
-    evtchn_send(iopacket_port(d->domain));
+    evtchn_send(iopacket_port(v->domain));
     vmx_wait_io();
 }
 
 static void mmio_operands(int type, unsigned long gpa, struct instruction 
*inst,
-                          struct mi_per_cpu_info *mpcip, struct cpu_user_regs 
*regs)
+                          struct mmio_op *mmio_opp, struct cpu_user_regs *regs)
 {
     unsigned long value = 0;
     int index, size;
-    
+
     size = operand_size(inst->operand[0]);
 
-    mpcip->flags = inst->flags;
-    mpcip->instr = inst->instr;
-    mpcip->operand[0] = inst->operand[0]; /* source */
-    mpcip->operand[1] = inst->operand[1]; /* destination */
-    mpcip->immediate = inst->immediate;
+    mmio_opp->flags = inst->flags;
+    mmio_opp->instr = inst->instr;
+    mmio_opp->operand[0] = inst->operand[0]; /* source */
+    mmio_opp->operand[1] = inst->operand[1]; /* destination */
+    mmio_opp->immediate = inst->immediate;
 
     if (inst->operand[0] & REGISTER) { /* dest is memory */
         index = operand_index(inst->operand[0]);
@@ -687,19 +687,19 @@
 
 #define GET_REPEAT_COUNT() \
      (mmio_inst.flags & REPZ ? (vm86 ? regs->ecx & 0xFFFF : regs->ecx) : 1)
- 
+
 void handle_mmio(unsigned long va, unsigned long gpa)
 {
     unsigned long eip, eflags, cs;
     unsigned long inst_len, inst_addr;
-    struct mi_per_cpu_info *mpcip;
+    struct mmio_op *mmio_opp;
     struct cpu_user_regs *regs;
     struct instruction mmio_inst;
     unsigned char inst[MAX_INST_LEN];
     int i, vm86, ret;
-     
-    mpcip = &current->domain->arch.vmx_platform.mpci;
-    regs = mpcip->inst_decoder_regs;
+
+    mmio_opp = &current->arch.arch_vmx.mmio_op;
+    regs = mmio_opp->inst_decoder_regs;
 
     __vmread(GUEST_RIP, &eip);
     __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
@@ -720,7 +720,7 @@
     }
 
     init_instruction(&mmio_inst);
-    
+
     if (vmx_decode(inst, &mmio_inst) == DECODE_failure) {
         printf("mmio opcode: va 0x%lx, gpa 0x%lx, len %ld:",
                va, gpa, inst_len);
@@ -735,7 +735,7 @@
 
     switch (mmio_inst.instr) {
     case INSTR_MOV:
-        mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
+        mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mmio_opp, regs);
         break;
 
     case INSTR_MOVS:
@@ -769,8 +769,8 @@
             }
         }
 
-        mpcip->flags = mmio_inst.flags;
-        mpcip->instr = mmio_inst.instr;
+        mmio_opp->flags = mmio_inst.flags;
+        mmio_opp->instr = mmio_inst.instr;
 
         /*
          * In case of a movs spanning multiple pages, we break the accesses
@@ -785,7 +785,7 @@
         if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
             unsigned long value = 0;
 
-            mpcip->flags |= OVERLAP;
+            mmio_opp->flags |= OVERLAP;
 
             regs->eip -= inst_len; /* do not advance %eip */
 
@@ -808,7 +808,7 @@
     }
 
     case INSTR_MOVZ:
-        mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
+        mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mmio_opp, regs);
         break;
 
     case INSTR_STOS:
@@ -816,31 +816,31 @@
          * Since the destination is always in (contiguous) mmio space we don't
          * need to break it up into pages.
          */
-        mpcip->flags = mmio_inst.flags;
-        mpcip->instr = mmio_inst.instr;
+        mmio_opp->flags = mmio_inst.flags;
+        mmio_opp->instr = mmio_inst.instr;
         send_mmio_req(IOREQ_TYPE_COPY, gpa,
                       GET_REPEAT_COUNT(), mmio_inst.op_size, regs->eax, 
IOREQ_WRITE, 0);
         break;
 
     case INSTR_OR:
-        mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mpcip, regs);
+        mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mmio_opp, regs);
         break;
 
     case INSTR_AND:
-        mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mpcip, regs);
+        mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mmio_opp, regs);
         break;
 
     case INSTR_XOR:
-        mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mpcip, regs);
+        mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mmio_opp, regs);
         break;
 
     case INSTR_CMP:        /* Pass through */
     case INSTR_TEST:
-        mpcip->flags = mmio_inst.flags;
-        mpcip->instr = mmio_inst.instr;
-        mpcip->operand[0] = mmio_inst.operand[0]; /* source */
-        mpcip->operand[1] = mmio_inst.operand[1]; /* destination */
-        mpcip->immediate = mmio_inst.immediate;
+        mmio_opp->flags = mmio_inst.flags;
+        mmio_opp->instr = mmio_inst.instr;
+        mmio_opp->operand[0] = mmio_inst.operand[0]; /* source */
+        mmio_opp->operand[1] = mmio_inst.operand[1]; /* destination */
+        mmio_opp->immediate = mmio_inst.immediate;
 
         /* send the request and wait for the value */
         send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, mmio_inst.op_size, 0, 
IOREQ_READ, 0);
diff -r 62d815160f01 -r 1b4ad6eb6968 xen/include/asm-x86/vmx_platform.h
--- a/xen/include/asm-x86/vmx_platform.h        Fri Oct  7 15:49:29 2005
+++ b/xen/include/asm-x86/vmx_platform.h        Fri Oct  7 22:17:24 2005
@@ -75,20 +75,11 @@
 
 #define MAX_INST_LEN      32
 
-struct mi_per_cpu_info {
-    int                    flags;
-    int      instr;  /* instruction */
-    unsigned long          operand[2];  /* operands */
-    unsigned long          immediate;  /* immediate portion */
-    struct cpu_user_regs   *inst_decoder_regs; /* current context */
-};
-
 struct virtual_platform_def {
     unsigned long          *real_mode_data; /* E820, etc. */
     unsigned long          shared_page_va;
     struct vmx_virpit_t    vmx_pit;
     struct vmx_handler_t   vmx_handler;
-    struct mi_per_cpu_info mpci;  /* MMIO */
 };
 
 extern void handle_mmio(unsigned long, unsigned long);
diff -r 62d815160f01 -r 1b4ad6eb6968 xen/include/asm-x86/vmx_vmcs.h
--- a/xen/include/asm-x86/vmx_vmcs.h    Fri Oct  7 15:49:29 2005
+++ b/xen/include/asm-x86/vmx_vmcs.h    Fri Oct  7 22:17:24 2005
@@ -71,6 +71,14 @@
     unsigned long shadow_gs;
 };
 
+struct mmio_op {
+    int                    flags;
+    int                    instr;       /* instruction */
+    unsigned long          operand[2];  /* operands */
+    unsigned long          immediate;   /* immediate portion */
+    struct cpu_user_regs   *inst_decoder_regs; /* current context */
+};
+
 #define PC_DEBUG_PORT   0x80 
 
 struct arch_vmx_struct {
@@ -83,7 +91,8 @@
     unsigned long           cpu_state;
     unsigned long           cpu_based_exec_control;
     struct msr_state        msr_content;
-    void                   *io_bitmap_a, *io_bitmap_b;
+    struct mmio_op          mmio_op;  /* MMIO */
+    void                    *io_bitmap_a, *io_bitmap_b;
 };
 
 #define vmx_schedule_tail(next)         \

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.