[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC][PATCH v2x prototype 1/1] Add IOREQ_TYPE_VMWARE_PORT



This adds synchronisation of the 6 vcpu registers (only 32bits of
them) that vmport.c needs between Xen and QEMU.

This is to avoid a 2nd and 3rd exchange between QEMU and Xen to
fetch and put these 6 vcpu registers used by the code in vmport.c
and vmmouse.c

QEMU patch is named "xen-hvm.c: Add support for Xen access to vmport"

Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
---
As requested by Paul Durrant <Paul.Durrant@xxxxxxxxxx>

Here is a prototype of the QEMU change using a 2nd shared page.
I picked adding HVM_PARAM_VMPORT_IOREQ_PFN as the simple and
fast way to handle QEMU building on older Xen versions.

There is xentrace and debug logging that is TBD for the Xen 4.6
submission of this.

 tools/libxc/xc_hvm_build_x86.c         |  5 ++-
 tools/xentrace/formats                 |  1 +
 xen/arch/x86/hvm/emulate.c             | 62 ++++++++++++++++++++++----
 xen/arch/x86/hvm/hvm.c                 | 81 ++++++++++++++++++++++++++++------
 xen/arch/x86/hvm/io.c                  | 40 ++++++++++++++++-
 xen/arch/x86/hvm/svm/svm.c             | 21 +++++++--
 xen/arch/x86/hvm/vmware/vmport.c       | 30 ++++++++++++-
 xen/arch/x86/hvm/vmx/vmx.c             | 23 ++++++++--
 xen/arch/x86/x86_emulate/x86_emulate.h |  2 +
 xen/include/asm-x86/hvm/domain.h       |  1 +
 xen/include/asm-x86/hvm/emulate.h      |  3 ++
 xen/include/asm-x86/hvm/hvm.h          |  2 +-
 xen/include/asm-x86/hvm/io.h           |  2 +-
 xen/include/asm-x86/hvm/trace.h        |  1 +
 xen/include/asm-x86/hvm/vcpu.h         |  1 +
 xen/include/public/hvm/ioreq.h         | 17 +++++++
 xen/include/public/hvm/params.h        |  4 +-
 xen/include/public/trace.h             |  1 +
 18 files changed, 261 insertions(+), 36 deletions(-)

diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c
index c81a25b..e45fa62 100644
--- a/tools/libxc/xc_hvm_build_x86.c
+++ b/tools/libxc/xc_hvm_build_x86.c
@@ -46,7 +46,8 @@
 #define SPECIALPAGE_IOREQ    5
 #define SPECIALPAGE_IDENT_PT 6
 #define SPECIALPAGE_CONSOLE  7
-#define NR_SPECIAL_PAGES     8
+#define SPECIALPAGE_VMPORT_IOREQ 8
+#define NR_SPECIAL_PAGES     9
 #define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x))
 
 #define NR_IOREQ_SERVER_PAGES 8
@@ -493,6 +494,8 @@ static int setup_guest(xc_interface *xch,
                      special_pfn(SPECIALPAGE_BUFIOREQ));
     xc_hvm_param_set(xch, dom, HVM_PARAM_IOREQ_PFN,
                      special_pfn(SPECIALPAGE_IOREQ));
+    xc_hvm_param_set(xch, dom, HVM_PARAM_VMPORT_IOREQ_PFN,
+                     special_pfn(SPECIALPAGE_VMPORT_IOREQ));
     xc_hvm_param_set(xch, dom, HVM_PARAM_CONSOLE_PFN,
                      special_pfn(SPECIALPAGE_CONSOLE));
     xc_hvm_param_set(xch, dom, HVM_PARAM_PAGING_RING_PFN,
diff --git a/tools/xentrace/formats b/tools/xentrace/formats
index 7b21b22..26e128e 100644
--- a/tools/xentrace/formats
+++ b/tools/xentrace/formats
@@ -92,6 +92,7 @@
 0x0008202a  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  VMPORT_BAD [ dir = %(1)d bytes 
= %(2)d eax = 0x%(3)08x eip = 0x%(4)08x ]
 0x0008212a  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  VMPORT_BAD [ dir = %(1)d bytes 
= %(2)d eax = 0x%(3)08x rip = 0x%(5)08x%(4)08x ]
 0x0008202b  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  VMPORT_UNKNOWN [ bytes << 8 + 
dir = 0x%(1)03x cmd = 0x%(2)x cmd = %(2)d ebx = 0x%(3)08x ecx = 0x%(4)08x esi = 
0x%(5)08x edi = 0x%(6)08x ]
+0x0008202c  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  VMPORT_QEMU [ eax = 0x%(1)08x 
ebx = 0x%(2)08x ecx = 0x%(3)08x edx = 0x%(4)08x esi = 0x%(5)08x edi = 0x%(6)08x 
]
 
 0x0010f001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  page_grant_map      [ domid = 
%(1)d ]
 0x0010f002  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  page_grant_unmap    [ domid = 
%(1)d ]
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index c0f47d2..4b8ea8f 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -52,12 +52,14 @@ static void hvmtrace_io_assist(int is_mmio, ioreq_t *p)
 
 static int hvmemul_do_io(
     int is_mmio, paddr_t addr, unsigned long *reps, int size,
-    paddr_t ram_gpa, int dir, int df, void *p_data)
+    paddr_t ram_gpa, int dir, int df, void *p_data,
+    struct cpu_user_regs *regs)
 {
     struct vcpu *curr = current;
     struct hvm_vcpu_io *vio;
     ioreq_t p = {
-        .type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO,
+        .type = regs ? IOREQ_TYPE_VMWARE_PORT :
+                is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO,
         .addr = addr,
         .size = size,
         .dir = dir,
@@ -65,11 +67,15 @@ static int hvmemul_do_io(
         .data = ram_gpa,
         .data_is_ptr = (p_data == NULL),
     };
+    vmware_ioreq_t vp;
+    vmware_ioreq_t *vpp;
     unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
     p2m_type_t p2mt;
     struct page_info *ram_page;
     int rc;
 
+    BUILD_BUG_ON(sizeof(ioreq_t) < sizeof(vmware_ioreq_t));
+
     /* Check for paged out page */
     ram_page = get_page_from_gfn(curr->domain, ram_gfn, &p2mt, P2M_UNSHARE);
     if ( p2m_is_paging(p2mt) )
@@ -101,7 +107,17 @@ static int hvmemul_do_io(
         return X86EMUL_UNHANDLEABLE;
     }
 
-    if ( !p.data_is_ptr && (dir == IOREQ_WRITE) )
+    if ( regs )
+    {
+        vpp = &vp;
+        p.data = regs->rax;
+        vp.ebx = regs->rbx;
+        vp.ecx = regs->rcx;
+        vp.edx = regs->rdx;
+        vp.esi = regs->rsi;
+        vp.edi = regs->rdi;
+    }
+    else if ( !p.data_is_ptr && (dir == IOREQ_WRITE) )
     {
         memcpy(&p.data, p_data, size);
         p_data = NULL;
@@ -161,7 +177,19 @@ static int hvmemul_do_io(
                 put_page(ram_page);
             return X86EMUL_RETRY;
         }
+    case HVMIO_awaiting_completion:
+        if ( regs )
+        {
+            /* May have to wait for previous cycle of a multi-write to 
complete. */
+            if ( vio->mmio_retry ) {
+                gdprintk(XENLOG_WARNING, "WARNING: mmio_retry io_state=%d?\n", 
vio->io_state);
+                return X86EMUL_RETRY;
+            }
+            /* These are expected if we get here via hvmemul_do_io() */
+            break;
+        }
     default:
+        gdprintk(XENLOG_WARNING, "WARNING: io_state=%d?\n", vio->io_state);
         if ( ram_page )
             put_page(ram_page);
         return X86EMUL_UNHANDLEABLE;
@@ -175,7 +203,7 @@ static int hvmemul_do_io(
         return X86EMUL_UNHANDLEABLE;
     }
 
-    vio->io_state =
+    vio->io_state = regs ? HVMIO_handle_vmport_awaiting_completion :
         (p_data == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion;
     vio->io_size = size;
 
@@ -197,6 +225,9 @@ static int hvmemul_do_io(
         if ( rc == X86EMUL_UNHANDLEABLE )
             rc = hvm_buffered_io_intercept(&p);
     }
+    else if ( regs ) {
+        rc = X86EMUL_UNHANDLEABLE;
+    }
     else
     {
         rc = hvm_portio_intercept(&p);
@@ -210,7 +241,7 @@ static int hvmemul_do_io(
         p.state = STATE_IORESP_READY;
         if ( !vio->mmio_retry )
         {
-            hvm_io_assist(&p);
+            hvm_io_assist(&p, vpp);
             vio->io_state = HVMIO_none;
         }
         else
@@ -226,13 +257,19 @@ static int hvmemul_do_io(
         }
         else
         {
-            rc = X86EMUL_RETRY;
-            if ( !hvm_send_assist_req(&p) )
+            if ( regs )
+                rc = X86EMUL_VMPORT_RETRY;
+            else
+                rc = X86EMUL_RETRY;
+            if ( !hvm_send_assist_req(&p, vpp) )
                 vio->io_state = HVMIO_none;
             else if ( p_data == NULL )
                 rc = X86EMUL_OKAY;
         }
         break;
+    case X86EMUL_VMPORT_RETRY:
+        rc = X86EMUL_RETRY;
+        break;
     default:
         BUG();
     }
@@ -287,14 +324,21 @@ int hvmemul_do_pio(
     unsigned long port, unsigned long *reps, int size,
     paddr_t ram_gpa, int dir, int df, void *p_data)
 {
-    return hvmemul_do_io(0, port, reps, size, ram_gpa, dir, df, p_data);
+    return hvmemul_do_io(0, port, reps, size, ram_gpa, dir, df, p_data, NULL);
+}
+
+int hvmemul_do_vmport(
+    unsigned long port, unsigned long *reps, int size,
+    int dir, void *p_data, struct cpu_user_regs *regs)
+{
+    return hvmemul_do_io(0, port, reps, size, 0, dir, 0, p_data, regs);
 }
 
 static int hvmemul_do_mmio(
     paddr_t gpa, unsigned long *reps, int size,
     paddr_t ram_gpa, int dir, int df, void *p_data)
 {
-    return hvmemul_do_io(1, gpa, reps, size, ram_gpa, dir, df, p_data);
+    return hvmemul_do_io(1, gpa, reps, size, ram_gpa, dir, df, p_data, NULL);
 }
 
 /*
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 8d0a3a0..fd05a85 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -382,6 +382,16 @@ static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, 
struct vcpu *v)
     return &p->vcpu_ioreq[v->vcpu_id];
 }
 
+static vmware_ioreq_t *get_vmport_ioreq(struct hvm_ioreq_server *s, struct 
vcpu *v)
+{
+    shared_vmport_iopage_t *p = s->vmport_ioreq.va;
+
+    ASSERT((v == current) || !vcpu_runnable(v));
+    ASSERT(p != NULL);
+
+    return &p->vcpu_vmport_ioreq[v->vcpu_id];
+}
+
 bool_t hvm_io_pending(struct vcpu *v)
 {
     struct domain *d = v->domain;
@@ -400,7 +410,8 @@ bool_t hvm_io_pending(struct vcpu *v)
     return 0;
 }
 
-static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
+static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p,
+                              vmware_ioreq_t *vp)
 {
     /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
     while ( p->state != STATE_IOREQ_NONE )
@@ -409,7 +420,7 @@ static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, 
ioreq_t *p)
         {
         case STATE_IORESP_READY: /* IORESP_READY -> NONE */
             rmb(); /* see IORESP_READY /then/ read contents of ioreq */
-            hvm_io_assist(p);
+            hvm_io_assist(p, vp);
             break;
         case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
         case STATE_IOREQ_INPROCESS:
@@ -449,7 +460,8 @@ void hvm_do_resume(struct vcpu *v)
         {
             if ( sv->vcpu == v )
             {
-                if ( !hvm_wait_for_io(sv, get_ioreq(s, v)) )
+                if ( !hvm_wait_for_io(sv, get_ioreq(s, v),
+                                      get_vmport_ioreq(s, v)) )
                     return;
 
                 break;
@@ -491,22 +503,50 @@ static void hvm_free_ioreq_gmfn(struct domain *d, 
unsigned long gmfn)
     clear_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask);
 }
 
-static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool_t buf)
+static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, int buf)
 {
-    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+    struct hvm_ioreq_page *iorp = NULL;
+
+    switch ( buf )
+    {
+    case 0:
+        iorp = &s->ioreq;
+        break;
+    case 1:
+        iorp = &s->bufioreq;
+        break;
+    case 2:
+        iorp = &s->vmport_ioreq;
+        break;
+    }
+    ASSERT(iorp);
 
     destroy_ring_for_helper(&iorp->va, iorp->page);
 }
 
 static int hvm_map_ioreq_page(
-    struct hvm_ioreq_server *s, bool_t buf, unsigned long gmfn)
+    struct hvm_ioreq_server *s, int buf, unsigned long gmfn)
 {
     struct domain *d = s->domain;
-    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+    struct hvm_ioreq_page *iorp = NULL;
     struct page_info *page;
     void *va;
     int rc;
 
+    switch ( buf )
+    {
+    case 0:
+        iorp = &s->ioreq;
+        break;
+    case 1:
+        iorp = &s->bufioreq;
+        break;
+    case 2:
+        iorp = &s->vmport_ioreq;
+        break;
+    }
+    ASSERT(iorp);
+
     if ( (rc = prepare_ring_for_helper(d, gmfn, &page, &va)) )
         return rc;
 
@@ -717,7 +757,7 @@ static int hvm_ioreq_server_map_pages(struct 
hvm_ioreq_server *s,
                                       bool_t is_default, bool_t 
handle_bufioreq)
 {
     struct domain *d = s->domain;
-    unsigned long ioreq_pfn, bufioreq_pfn;
+    unsigned long ioreq_pfn, bufioreq_pfn, vmport_ioreq_pfn = 0;
     int rc;
 
     if ( is_default )
@@ -730,6 +770,7 @@ static int hvm_ioreq_server_map_pages(struct 
hvm_ioreq_server *s,
          */
         ASSERT(handle_bufioreq);
         bufioreq_pfn = d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN];
+        vmport_ioreq_pfn = 
d->arch.hvm_domain.params[HVM_PARAM_VMPORT_IOREQ_PFN];
     }
     else
     {
@@ -754,10 +795,16 @@ static int hvm_ioreq_server_map_pages(struct 
hvm_ioreq_server *s,
         rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn);
         if ( rc )
             goto fail4;
+        rc = hvm_map_ioreq_page(s, 2, vmport_ioreq_pfn);
+        if ( rc )
+            goto fail5;
     }
 
     return 0;
 
+fail5:
+    hvm_unmap_ioreq_page(s, 2);
+
 fail4:
     hvm_unmap_ioreq_page(s, 0);
 
@@ -2510,7 +2557,8 @@ bool_t hvm_has_dm(struct domain *d)
 }
 
 bool_t hvm_send_assist_req_to_ioreq_server(struct hvm_ioreq_server *s,
-                                           ioreq_t *proto_p)
+                                           ioreq_t *proto_p,
+                                           vmware_ioreq_t *proto_vp)
 {
     struct vcpu *curr = current;
     struct domain *d = curr->domain;
@@ -2544,6 +2592,12 @@ bool_t hvm_send_assist_req_to_ioreq_server(struct 
hvm_ioreq_server *s,
                 goto crash;
             }
 
+            if ( proto_vp )
+            {
+                vmware_ioreq_t *vp = get_vmport_ioreq(s, curr);
+
+                *vp = *proto_vp;
+            }
             proto_p->state = STATE_IOREQ_NONE;
             proto_p->vp_eport = port;
             *p = *proto_p;
@@ -2591,21 +2645,21 @@ static bool_t hvm_complete_assist_req(ioreq_t *p)
         /* FALLTHRU */
     default:
         p->state = STATE_IORESP_READY;
-        hvm_io_assist(p);
+        hvm_io_assist(p, NULL);
         break;
     }
 
     return 1;
 }
 
-bool_t hvm_send_assist_req(ioreq_t *p)
+bool_t hvm_send_assist_req(ioreq_t *p, vmware_ioreq_t *vp)
 {
     struct hvm_ioreq_server *s = hvm_select_ioreq_server(current->domain, p);
 
     if ( !s )
         return hvm_complete_assist_req(p);
 
-    return hvm_send_assist_req_to_ioreq_server(s, p);
+    return hvm_send_assist_req_to_ioreq_server(s, p, vp);
 }
 
 void hvm_broadcast_assist_req(ioreq_t *p)
@@ -2618,7 +2672,7 @@ void hvm_broadcast_assist_req(ioreq_t *p)
     list_for_each_entry ( s,
                           &d->arch.hvm_domain.ioreq_server.list,
                           list_entry )
-        (void) hvm_send_assist_req_to_ioreq_server(s, p);
+        (void) hvm_send_assist_req_to_ioreq_server(s, p, NULL);
 }
 
 void hvm_hlt(unsigned long rflags)
@@ -5763,6 +5817,7 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
                     break;
                 }
             case HVM_PARAM_IOREQ_PFN:
+            case HVM_PARAM_VMPORT_IOREQ_PFN:
             case HVM_PARAM_BUFIOREQ_PFN:
             case HVM_PARAM_BUFIOREQ_EVTCHN: {
                 domid_t domid;
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 68fb890..eb09032 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -154,8 +154,17 @@ int handle_pio(uint16_t port, unsigned int size, int dir)
         }
         break;
     case X86EMUL_RETRY:
-        if ( vio->io_state != HVMIO_awaiting_completion )
+        if ( vio->io_state == HVMIO_handle_vmport_awaiting_completion ) {
+            /* Completion in hvm_io_assist() with no re-emulation required. */
+#ifdef VMPORT_IO_LOGGING
+            gdprintk(XENLOG_WARNING, "_vmport_awaiting\n");
+#endif
+            return 1;
+        }
+        if ( vio->io_state != HVMIO_awaiting_completion ) {
+            gdprintk(XENLOG_WARNING, "WARNING: io_state=%d?\n", vio->io_state);
             return 0;
+        }
         /* Completion in hvm_io_assist() with no re-emulation required. */
         ASSERT(dir == IOREQ_READ);
         vio->io_state = HVMIO_handle_pio_awaiting_completion;
@@ -169,7 +178,7 @@ int handle_pio(uint16_t port, unsigned int size, int dir)
     return 1;
 }
 
-void hvm_io_assist(ioreq_t *p)
+void hvm_io_assist(ioreq_t *p, vmware_ioreq_t *vp)
 {
     struct vcpu *curr = current;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
@@ -197,6 +206,33 @@ void hvm_io_assist(ioreq_t *p)
         else
             memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size);
         break;
+    case HVMIO_handle_vmport_awaiting_completion:
+    {
+        struct cpu_user_regs *regs = guest_cpu_user_regs();
+
+        ASSERT(vp);
+#ifdef VMPORT_IO_LOGGING
+        gdprintk(XENLOG_WARNING, "vmport done ip=0x%lx\n",
+                 (long)regs->rip);
+#endif
+        if ( p->dir == IOREQ_READ)
+        {
+            if ( vio->io_size == 4 ) /* Needs zero extension. */
+                regs->rax = (uint32_t)p->data;
+            else
+                memcpy(&regs->rax, &p->data, vio->io_size);
+        }
+        /* Only change the 32bit part of the register */
+        regs->_ebx = vp->ebx;
+        regs->_ecx = vp->ecx;
+        regs->_edx = vp->edx;
+        regs->_esi = vp->esi;
+        regs->_edi = vp->edi;
+        HVMTRACE_ND(VMPORT_QEMU, 0, 1/*cycles*/, 6,
+                    regs->rax, regs->rbx, regs->rcx,
+                    regs->rdx, regs->rsi, regs->rdi);
+    }
+        break;
     default:
         break;
     }
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 8b1185e..ae21356 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2140,10 +2140,24 @@ static void svm_vmexit_gp_intercept(struct 
cpu_user_regs *regs,
         HVMTRACE_C4D(TRAP_GP, inst_len, starting_rdx, vmcb->exitinfo1,
                      vmcb->exitinfo2);
 
-    if ( !rc )
-        __update_guest_eip(regs, inst_len);
-    else
+    switch ( rc )
     {
+    case X86EMUL_VMPORT_RETRY:
+        rc = X86EMUL_RETRY;
+        /* fall through */
+    case X86EMUL_RETRY:
+    {
+        struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
+        if ( vio->io_state != HVMIO_handle_vmport_awaiting_completion ) {
+            gdprintk(XENLOG_WARNING, "WARNING: io_state=%d?\n", vio->io_state);
+            break;
+        }
+    }
+        /* fall through */
+    case X86EMUL_OKAY:
+        __update_guest_eip(regs, inst_len);
+        break;
+    default:
         VMPORT_DBG_LOG(VMPORT_LOG_GP_UNKNOWN,
                        "gp: rc=%d ei1=0x%lx ei2=0x%lx ec=0x%x ip=%"PRIx64
                        " (0x%lx,%ld) ax=%"PRIx64" bx=%"PRIx64" cx=%"PRIx64
@@ -2159,6 +2173,7 @@ static void svm_vmexit_gp_intercept(struct cpu_user_regs 
*regs,
             HVMTRACE_C5D(TRAP_GP_UNKNOWN, rc, regs->rax, regs->rbx, regs->rcx,
                          inst_addr);
         hvm_inject_hw_exception(TRAP_gp_fault, vmcb->exitinfo1);
+        break;
     }
 }
 
diff --git a/xen/arch/x86/hvm/vmware/vmport.c b/xen/arch/x86/hvm/vmware/vmport.c
index 962ee32..d1632bb 100644
--- a/xen/arch/x86/hvm/vmware/vmport.c
+++ b/xen/arch/x86/hvm/vmware/vmport.c
@@ -147,9 +147,34 @@ int vmport_ioport(int dir, uint32_t port, uint32_t bytes, 
uint32_t *val)
             regs->rax = 0x0;
             break;
         default:
+        {   /* Let backing DM handle */
+            unsigned long data, reps = 1;
+
             HVMTRACE_ND(VMPORT_UNKNOWN, 0, 1/*cycles*/, 6,
-                        (bytes << 8) + dir, cmd, regs->rbx,
+                        (bytes << 8) | dir, cmd, regs->rbx,
                         regs->rcx, regs->rsi, regs->rdi);
+            rc = hvmemul_do_vmport(BDOOR_PORT, &reps, bytes, dir, &data, regs);
+            switch (rc)
+            {
+            case X86EMUL_OKAY:
+                break;
+            case X86EMUL_VMPORT_RETRY:
+            case X86EMUL_RETRY:
+            {
+                struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
+
+                if ( vio->io_state != HVMIO_handle_vmport_awaiting_completion )
+                    gdprintk(XENLOG_WARNING, "vio: io_state=%d ==> %d\n",
+                             vio->io_state, rc);
+                return rc;
+                break;
+            }
+            default:
+                gdprintk(XENLOG_ERR, "Weird HVM ioemulation status %d.\n", rc);
+                domain_crash(current->domain);
+                break;
+            }
+        }
             break;
         }
 
@@ -190,6 +215,9 @@ int vmport_ioport(int dir, uint32_t port, uint32_t bytes, 
uint32_t *val)
         rc = X86EMUL_UNHANDLEABLE;
     }
 
+#ifdef VMPORT_IO_LOGGING
+    gdprintk(XENLOG_WARNING, "vmport: rc=%d\n", rc);
+#endif
     return rc;
 }
 
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index c84894a..dd079bd 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2643,10 +2643,24 @@ static void vmx_vmexit_gp_intercept(struct 
cpu_user_regs *regs,
                  "Unexpected instruction length difference: %lu vs %lu\n",
                  orig_inst_len, inst_len);
 #endif
-    if ( !rc )
-        update_guest_eip();
-    else
+    switch ( rc )
     {
+    case X86EMUL_VMPORT_RETRY:
+        rc = X86EMUL_RETRY;
+        /* fall through */
+    case X86EMUL_RETRY:
+    {
+        struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
+        if ( vio->io_state != HVMIO_handle_vmport_awaiting_completion ) {
+            gdprintk(XENLOG_WARNING, "WARNING: io_state=%d?\n", vio->io_state);
+            break;
+        }
+    }
+        /* fall through */
+    case X86EMUL_OKAY:
+        update_guest_eip();
+        break;
+    default:
         VMPORT_DBG_LOG(VMPORT_LOG_GP_UNKNOWN,
                        "gp: rc=%d ecode=0x%lx eq=0x%lx ec=0x%x ip=%"PRIx64
                        " (0x%lx,%lu=>%lu) ax=%"PRIx64" bx=%"PRIx64
@@ -2661,7 +2675,8 @@ static void vmx_vmexit_gp_intercept(struct cpu_user_regs 
*regs,
         else
             HVMTRACE_C5D(TRAP_GP_UNKNOWN, rc, regs->rax, regs->rbx, regs->rcx,
                          inst_addr);
-        hvm_inject_hw_exception(TRAP_gp_fault, ecode);
+            hvm_inject_hw_exception(TRAP_gp_fault, ecode);
+        break;
     }
 }
 
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h 
b/xen/arch/x86/x86_emulate/x86_emulate.h
index b059341..1733eca 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -110,6 +110,8 @@ struct __packed segment_register {
 #define X86EMUL_RETRY          3
  /* (cmpxchg accessor): CMPXCHG failed. Maps to X86EMUL_RETRY in caller. */
 #define X86EMUL_CMPXCHG_FAILED 3
+ /* Like X86EMUL_RETRY, but do not change vio->io_state. */
+#define X86EMUL_VMPORT_RETRY   4
 
 /* FPU sub-types which may be requested via ->get_fpu(). */
 enum x86_emulate_fpu_type {
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index d4718df..e7e6cd9 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -63,6 +63,7 @@ struct hvm_ioreq_server {
     ioservid_t             id;
     struct hvm_ioreq_page  ioreq;
     struct list_head       ioreq_vcpu_list;
+    struct hvm_ioreq_page  vmport_ioreq;
     struct hvm_ioreq_page  bufioreq;
 
     /* Lock to serialize access to buffered ioreq ring */
diff --git a/xen/include/asm-x86/hvm/emulate.h 
b/xen/include/asm-x86/hvm/emulate.h
index 5411302..6c3ff2a 100644
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -53,6 +53,9 @@ struct segment_register *hvmemul_get_seg_reg(
 int hvmemul_do_pio(
     unsigned long port, unsigned long *reps, int size,
     paddr_t ram_gpa, int dir, int df, void *p_data);
+int hvmemul_do_vmport(
+    unsigned long port, unsigned long *reps, int size,
+    int dir, void *p_data, struct cpu_user_regs *regs);
 
 void hvm_dump_emulation_state(const char *prefix,
                               struct hvm_emulate_ctxt *hvmemul_ctxt);
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 0910147..b57c2d7 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -228,7 +228,7 @@ int hvm_vcpu_cacheattr_init(struct vcpu *v);
 void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
 void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);
 
-bool_t hvm_send_assist_req(ioreq_t *p);
+bool_t hvm_send_assist_req(ioreq_t *p, vmware_ioreq_t *vp);
 void hvm_broadcast_assist_req(ioreq_t *p);
 
 void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index d257161..b100fea 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -123,7 +123,7 @@ int handle_mmio_with_translation(unsigned long gva, 
unsigned long gpfn,
                                  struct npfec);
 int handle_pio(uint16_t port, unsigned int size, int dir);
 void hvm_interrupt_post(struct vcpu *v, int vector, int type);
-void hvm_io_assist(ioreq_t *p);
+void hvm_io_assist(ioreq_t *p, vmware_ioreq_t *vp);
 void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
                   const union vioapic_redir_entry *ent);
 void msix_write_completion(struct vcpu *);
diff --git a/xen/include/asm-x86/hvm/trace.h b/xen/include/asm-x86/hvm/trace.h
index 8af2d6a..10d78bb 100644
--- a/xen/include/asm-x86/hvm/trace.h
+++ b/xen/include/asm-x86/hvm/trace.h
@@ -66,6 +66,7 @@
 #define DO_TRC_HVM_VMPORT_BAD         DEFAULT_HVM_IO
 #define DO_TRC_HVM_VMPORT_BAD64       DEFAULT_HVM_IO
 #define DO_TRC_HVM_VMPORT_UNKNOWN     DEFAULT_HVM_IO
+#define DO_TRC_HVM_VMPORT_QEMU     DEFAULT_HVM_IO
 
 
 #define TRC_PAR_LONG(par) ((par)&0xFFFFFFFF),((par)>>32)
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 01e0665..1e63d7f 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -36,6 +36,7 @@ enum hvm_io_state {
     HVMIO_awaiting_completion,
     HVMIO_handle_mmio_awaiting_completion,
     HVMIO_handle_pio_awaiting_completion,
+    HVMIO_handle_vmport_awaiting_completion,
     HVMIO_completed
 };
 
diff --git a/xen/include/public/hvm/ioreq.h b/xen/include/public/hvm/ioreq.h
index 5b5fedf..c20b834 100644
--- a/xen/include/public/hvm/ioreq.h
+++ b/xen/include/public/hvm/ioreq.h
@@ -35,6 +35,7 @@
 #define IOREQ_TYPE_PIO          0 /* pio */
 #define IOREQ_TYPE_COPY         1 /* mmio ops */
 #define IOREQ_TYPE_PCI_CONFIG   2
+#define IOREQ_TYPE_VMWARE_PORT  3
 #define IOREQ_TYPE_TIMEOFFSET   7
 #define IOREQ_TYPE_INVALIDATE   8 /* mapcache */
 
@@ -48,6 +49,8 @@
  * 
  * 63....48|47..40|39..35|34..32|31........0
  * SEGMENT |BUS   |DEV   |FN    |OFFSET
+ *
+ * For I/O type IOREQ_TYPE_VMWARE_PORT also use the vmware_ioreq.
  */
 struct ioreq {
     uint64_t addr;          /* physical address */
@@ -66,11 +69,25 @@ struct ioreq {
 };
 typedef struct ioreq ioreq_t;
 
+struct vmware_ioreq {
+    uint32_t esi;
+    uint32_t edi;
+    uint32_t ebx;
+    uint32_t ecx;
+    uint32_t edx;
+};
+typedef struct vmware_ioreq vmware_ioreq_t;
+
 struct shared_iopage {
     struct ioreq vcpu_ioreq[1];
 };
 typedef struct shared_iopage shared_iopage_t;
 
+struct shared_vmport_iopage {
+    struct vmware_ioreq vcpu_vmport_ioreq[1];
+};
+typedef struct shared_vmport_iopage shared_vmport_iopage_t;
+
 struct buf_ioreq {
     uint8_t  type;   /* I/O type                    */
     uint8_t  pad:1;
diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h
index c893dc5..2d75bdd 100644
--- a/xen/include/public/hvm/params.h
+++ b/xen/include/public/hvm/params.h
@@ -50,6 +50,8 @@
 #define HVM_PARAM_PAE_ENABLED  4
 
 #define HVM_PARAM_IOREQ_PFN    5
+/* Extra vmport PFN. */
+#define HVM_PARAM_VMPORT_IOREQ_PFN 36
 
 #define HVM_PARAM_BUFIOREQ_PFN 6
 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
@@ -192,6 +194,6 @@
 /* Params for VMware */
 #define HVM_PARAM_VMWARE_HW                 35
 
-#define HVM_NR_PARAMS          36
+#define HVM_NR_PARAMS          37
 
 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
diff --git a/xen/include/public/trace.h b/xen/include/public/trace.h
index b231df3..e35a297 100644
--- a/xen/include/public/trace.h
+++ b/xen/include/public/trace.h
@@ -237,6 +237,7 @@
 #define TRC_HVM_VMPORT_BAD       (TRC_HVM_HANDLER + 0x2a)
 #define TRC_HVM_VMPORT_BAD64     (TRC_HVM_HANDLER + TRC_64_FLAG + 0x2a)
 #define TRC_HVM_VMPORT_UNKNOWN   (TRC_HVM_HANDLER + 0x2b)
+#define TRC_HVM_VMPORT_QEMU      (TRC_HVM_HANDLER + 0x2c)
 
 #define TRC_HVM_IOPORT_WRITE    (TRC_HVM_HANDLER + 0x216)
 #define TRC_HVM_IOMEM_WRITE     (TRC_HVM_HANDLER + 0x217)
-- 
1.8.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.