[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [PATCH] [PATCH] VMX support for MMIO/PIO in VM8086 mode



ChangeSet 1.1434, 2005/04/02 08:32:25+01:00, leendert@xxxxxxxxxxxxxx

        [PATCH] [PATCH] VMX support for MMIO/PIO in VM8086 mode
        
        Memory mapped and port I/O is currently broken under VMX when the
        partition is running in VM8086 mode. The reason is that the instruction
        decoding support uses 32-bit opcode/address decodes rather 16-bit
        decodes. This patch fixes that. In addition, the patch adds support for
        the "stos" instruction decoding because this is a frequently used way
        to clear MMIO areas such as the screen.
        
        As an aside, vmx_platform.c should really reuse x86_emulate.c as much
        as possible.
        
        Signed-off-by: Leendert van Doorn <leendert@xxxxxxxxxxxxxx>
        
        ===== tools/ioemu/iodev/cpu.cc 1.7 vs edited =====



 tools/ioemu/iodev/cpu.cc    |   25 +++++-
 xen/arch/x86/vmx.c          |   76 +++++++++++++++-----
 xen/arch/x86/vmx_platform.c |  166 ++++++++++++++++++++++++++++++++++----------
 3 files changed, 205 insertions(+), 62 deletions(-)


diff -Nru a/tools/ioemu/iodev/cpu.cc b/tools/ioemu/iodev/cpu.cc
--- a/tools/ioemu/iodev/cpu.cc  2005-04-02 03:03:25 -05:00
+++ b/tools/ioemu/iodev/cpu.cc  2005-04-02 03:03:25 -05:00
@@ -51,7 +51,7 @@
        if (req->state == STATE_IOREQ_READY) {
                req->state = STATE_IOREQ_INPROCESS;
        } else {
-               BX_INFO(("False I/O requrest ... in-service already: %lx, 
pvalid: %lx,port: %lx, data: %lx, count: %lx, size: %lx\n", req->state, 
req->pdata_valid, req->addr, req->u.data, req->count, req->size));
+               BX_INFO(("False I/O request ... in-service already: %lx, 
pvalid: %lx,port: %lx, data: %lx, count: %lx, size: %lx\n", req->state, 
req->pdata_valid, req->addr, req->u.data, req->count, req->size));
                req = NULL;
        }
 
@@ -95,6 +95,8 @@
        }
        if (req->port_mm == 0){//port io
                if(req->dir == IOREQ_READ){//read
+                       //BX_INFO(("pio: <READ>addr:%llx, value:%llx, size: 
%llx, count: %llx\n", req->addr, req->u.data, req->size, req->count));
+
                        if (!req->pdata_valid)
                                req->u.data = BX_INP(req->addr, req->size);
                        else {
@@ -107,6 +109,8 @@
                                }
                        }
                } else if(req->dir == IOREQ_WRITE) {
+                       //BX_INFO(("pio: <WRITE>addr:%llx, value:%llx, size: 
%llx, count: %llx\n", req->addr, req->u.data, req->size, req->count));
+
                        if (!req->pdata_valid) {
                                BX_OUTP(req->addr, (dma_addr_t) req->u.data, 
req->size);
                        } else {
@@ -123,20 +127,29 @@
        } else if (req->port_mm == 1){//memory map io
                if (!req->pdata_valid) {
                        if(req->dir == IOREQ_READ){//read
-                               BX_MEM_READ_PHYSICAL(req->addr, req->size, 
&req->u.data);
-                       } else if(req->dir == IOREQ_WRITE)//write
-                               BX_MEM_WRITE_PHYSICAL(req->addr, req->size, 
&req->u.data);
+                               //BX_INFO(("mmio[value]: <READ> addr:%llx, 
value:%llx, size: %llx, count: %llx\n", req->addr, req->u.data, req->size, 
req->count));
+
+                               for (i = 0; i < req->count; i++) {
+                                       BX_MEM_READ_PHYSICAL(req->addr, 
req->size, &req->u.data);
+                               }
+                       } else if(req->dir == IOREQ_WRITE) {//write
+                               //BX_INFO(("mmio[value]: <WRITE> addr:%llx, 
value:%llx, size: %llx, count: %llx\n", req->addr, req->u.data, req->size, 
req->count));
+
+                               for (i = 0; i < req->count; i++) {
+                                       BX_MEM_WRITE_PHYSICAL(req->addr, 
req->size, &req->u.data);
+                               }
+                       }
                } else {
                        //handle movs
                        unsigned long tmp;
                        if (req->dir == IOREQ_READ) {
-                               //BX_INFO(("<READ>addr:%llx, pdata:%llx, size: 
%x, count: %x\n", req->addr, req->u.pdata, req->size, req->count));
+                               //BX_INFO(("mmio[pdata]: <READ>addr:%llx, 
pdata:%llx, size: %x, count: %x\n", req->addr, req->u.pdata, req->size, 
req->count));
                                for (i = 0; i < req->count; i++) {
                                        BX_MEM_READ_PHYSICAL(req->addr + (sign 
* i * req->size), req->size, &tmp);
                                        BX_MEM_WRITE_PHYSICAL((dma_addr_t) 
req->u.pdata + (sign * i * req->size), req->size, &tmp);
                                }
                        } else if (req->dir == IOREQ_WRITE) {
-                               //BX_INFO(("<WRITE>addr:%llx, pdata:%llx, size: 
%x, count: %x\n", req->addr, req->u.pdata, req->size, req->count));
+                               //BX_INFO(("mmio[pdata]: <WRITE>addr:%llx, 
pdata:%llx, size: %x, count: %x\n", req->addr, req->u.pdata, req->size, 
req->count));
                                for (i = 0; i < req->count; i++) {
                                        
BX_MEM_READ_PHYSICAL((dma_addr_t)req->u.pdata + (sign * i * req->size), 
req->size, &tmp);
                                        BX_MEM_WRITE_PHYSICAL(req->addr + (sign 
* i * req->size), req->size, &tmp);
diff -Nru a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c        2005-04-02 03:03:25 -05:00
+++ b/xen/arch/x86/vmx.c        2005-04-02 03:03:25 -05:00
@@ -294,13 +294,17 @@
     vcpu_iodata_t *vio;
     ioreq_t *p;
     unsigned long addr;
-    unsigned long eip;
+    unsigned long eip, cs, eflags;
+    int vm86;
 
     __vmread(GUEST_EIP, &eip);
+    __vmread(GUEST_CS_SELECTOR, &cs);
+    __vmread(GUEST_EFLAGS, &eflags);
+    vm86 = eflags & X86_EFLAGS_VM ? 1 : 0;
 
     VMX_DBG_LOG(DBG_LEVEL_1, 
-            "vmx_io_instruction: eip=%p, exit_qualification = %lx",
-            eip, exit_qualification);
+            "vmx_io_instruction: vm86 %d, eip=%p:%p, exit_qualification = %lx",
+            vm86, cs, eip, exit_qualification);
 
     if (test_bit(6, &exit_qualification))
         addr = (exit_qualification >> 16) & (0xffff);
@@ -325,17 +329,29 @@
     p->size = (exit_qualification & 7) + 1;
 
     if (test_bit(4, &exit_qualification)) {
-        unsigned long eflags;
-
-        __vmread(GUEST_EFLAGS, &eflags);
         p->df = (eflags & X86_EFLAGS_DF) ? 1 : 0;
         p->pdata_valid = 1;
-        p->u.pdata = (void *) ((p->dir == IOREQ_WRITE) ?
-            regs->esi
-            : regs->edi);
+
+        if (vm86) {
+            unsigned long seg;
+            if (p->dir == IOREQ_WRITE) {
+                __vmread(GUEST_DS_SELECTOR, &seg);
+                p->u.pdata = (void *)
+                        ((seg << 4) | (regs->esi & 0xFFFF));
+            } else {
+                __vmread(GUEST_ES_SELECTOR, &seg);
+                p->u.pdata = (void *)
+                        ((seg << 4) | (regs->edi & 0xFFFF));
+            }
+        } else {
+               p->u.pdata = (void *) ((p->dir == IOREQ_WRITE) ?
+                   regs->esi : regs->edi);
+        }
         p->u.pdata = (void *) gva_to_gpa(p->u.data);
+
+
         if (test_bit(5, &exit_qualification))
-            p->count = regs->ecx;
+           p->count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
         if ((p->u.data & PAGE_MASK) != 
             ((p->u.data + p->count * p->size - 1) & PAGE_MASK)) {
             printk("stringio crosses page boundary!\n");
@@ -368,13 +384,20 @@
     do_block();
 }
 
+static int
+vm86assist(struct exec_domain *d)
+{
+    /* stay tuned ... */
+    return 0;
+}
+
 #define CASE_GET_REG(REG, reg)  \
     case REG_ ## REG: value = regs->reg; break
 
 /*
  * Write to control registers
  */
-static void mov_to_cr(int gp, int cr, struct xen_regs *regs)
+static int mov_to_cr(int gp, int cr, struct xen_regs *regs)
 {
     unsigned long value;
     unsigned long old_cr;
@@ -454,8 +477,21 @@
                     d->arch.arch_vmx.cpu_cr3, mfn);
             /* undo the get_page done in the para virt case */
             put_page_and_type(&frame_table[old_base_mfn]);
+        } else {
+            if ((value & X86_CR0_PE) == 0) {
+               unsigned long eip;
 
-        }
+               __vmread(GUEST_EIP, &eip);
+                VMX_DBG_LOG(DBG_LEVEL_1,
+                       "Disabling CR0.PE at %%eip 0x%lx", eip);
+               if (vm86assist(d)) {
+                   __vmread(GUEST_EIP, &eip);
+                   VMX_DBG_LOG(DBG_LEVEL_1,
+                       "Transfering control to vm86assist %%eip 0x%lx", eip);
+                   return 0; /* do not update eip! */
+               }
+           }
+       }
         break;
     }
     case 3: 
@@ -534,7 +570,9 @@
         printk("invalid cr: %d\n", gp);
         __vmx_bug(regs);
     }
-}   
+
+    return 1;
+}
 
 #define CASE_SET_REG(REG, reg)      \
     case REG_ ## REG:       \
@@ -575,7 +613,7 @@
     VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
 }
 
-static void vmx_cr_access (unsigned long exit_qualification, struct xen_regs 
*regs)
+static int vmx_cr_access(unsigned long exit_qualification, struct xen_regs 
*regs)
 {
     unsigned int gp, cr;
     unsigned long value;
@@ -584,8 +622,7 @@
     case TYPE_MOV_TO_CR:
         gp = exit_qualification & CONTROL_REG_ACCESS_REG;
         cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
-        mov_to_cr(gp, cr, regs);
-        break;
+        return mov_to_cr(gp, cr, regs);
     case TYPE_MOV_FROM_CR:
         gp = exit_qualification & CONTROL_REG_ACCESS_REG;
         cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
@@ -604,6 +641,7 @@
         __vmx_bug(regs);
         break;
     }
+    return 1;
 }
 
 static inline void vmx_do_msr_read(struct xen_regs *regs)
@@ -619,7 +657,7 @@
 }
 
 /*
- * Need to use this exit to rescheule
+ * Need to use this exit to reschedule
  */
 static inline void vmx_vmexit_do_hlt(void)
 {
@@ -891,8 +929,8 @@
 
         VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification 
= %lx", 
                 eip, inst_len, exit_qualification);
-        vmx_cr_access(exit_qualification, &regs);
-        __update_guest_eip(inst_len);
+        if (vmx_cr_access(exit_qualification, &regs))
+           __update_guest_eip(inst_len);
         break;
     }
     case EXIT_REASON_DR_ACCESS:
diff -Nru a/xen/arch/x86/vmx_platform.c b/xen/arch/x86/vmx_platform.c
--- a/xen/arch/x86/vmx_platform.c       2005-04-02 03:03:25 -05:00
+++ b/xen/arch/x86/vmx_platform.c       2005-04-02 03:03:25 -05:00
@@ -55,6 +55,8 @@
     __vmread(GUEST_ESP, &regs->esp);
     __vmread(GUEST_EFLAGS, &regs->eflags);
     __vmread(GUEST_CS_SELECTOR, &regs->cs);
+    __vmread(GUEST_DS_SELECTOR, &regs->ds);
+    __vmread(GUEST_ES_SELECTOR, &regs->es);
     __vmread(GUEST_EIP, &regs->eip);
 }
 
@@ -144,19 +146,27 @@
     while (1) {
         switch (*inst) {
             case 0xf3: //REPZ
+               thread_inst->flags = REPZ;
+               break;
             case 0xf2: //REPNZ
+               thread_inst->flags = REPNZ;
+               break;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.