[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC][PATCH v2 1/1] Add IOREQ_TYPE_VMWARE_PORT




On 10/03/14 05:54, Stefano Stabellini wrote:
On Fri, 3 Oct 2014, Ian Campbell wrote:
On Fri, 2014-10-03 at 10:47 +0100, Stefano Stabellini wrote:
The issue with a union is compatibility with older QEMU versions: we can
introduce the union and retain compatibility only if we use anonymous
unions.  However I seem to recall Jan arguing against anonymous unions
in public interfaces in past.
The canonical headers in xen/include/public are supposed to be strict
ANSI C and anonymous unions are a gcc extension.

However no-one is obliged to use this copy and several projects
(including Linux, *BSD and others) take copies and modify them to suite
their local coding styles/conventions etc. That could include using
anonymous unions if that is preferable. I'm not sure if that helps you
here though (since the issue AIUI is with existing qemu releases...)
Right, it doesn't help.
Even upstream QEMU still builds against external Xen header files.

Here is a union that as far as I know is ANSI C and avoids a cast to a different
type.  But it is a lot of changes just to avoid this.

Should I spend the time to make it part of this?

From 84b4f8eb944d436b4e47e4024ebefbbe4460cd32 Mon Sep 17 00:00:00 2001
From: Don Slutz <dslutz@xxxxxxxxxxx>
Date: Fri, 3 Oct 2014 15:15:11 -0400
Subject: [PATCH] Add union_ioreq_t

Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
---
xen/arch/x86/hvm/emulate.c | 90 +++++++++++++++++++++---------------------
 xen/arch/x86/hvm/hvm.c         | 73 +++++++++++++++++-----------------
 xen/arch/x86/hvm/io.c          | 27 ++++++-------
 xen/include/asm-x86/hvm/hvm.h  |  2 +-
 xen/include/asm-x86/hvm/io.h   |  2 +-
 xen/include/public/hvm/ioreq.h | 11 ++++++
 6 files changed, 109 insertions(+), 96 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 5f8c551..cee8a37 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -55,20 +55,18 @@ int hvmemul_do_vmport(uint16_t addr, int size, int dir,
 {
     struct vcpu *curr = current;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
-    vmware_ioreq_t p = {
-        .type = IOREQ_TYPE_VMWARE_PORT,
-        .addr = addr,
-        .size = size,
-        .dir = dir,
-        .eax = regs->rax,
-        .ebx = regs->rbx,
-        .ecx = regs->rcx,
-        .edx = regs->rdx,
-        .esi = regs->rsi,
-        .edi = regs->rdi,
+    union_ioreq_t up = {
+        .vreq.type = IOREQ_TYPE_VMWARE_PORT,
+        .vreq.addr = addr,
+        .vreq.size = size,
+        .vreq.dir = dir,
+        .vreq.eax = regs->rax,
+        .vreq.ebx = regs->rbx,
+        .vreq.ecx = regs->rcx,
+        .vreq.edx = regs->rdx,
+        .vreq.esi = regs->rsi,
+        .vreq.edi = regs->rdi,
     };
-    ioreq_t *pp = (ioreq_t *)&p;
-    ioreq_t op;

     BUILD_BUG_ON(sizeof(ioreq_t) != sizeof(vmware_ioreq_t));
BUILD_BUG_ON(offsetof(ioreq_t, type) != offsetof(vmware_ioreq_t, type)); @@ -104,21 +102,25 @@ int hvmemul_do_vmport(uint16_t addr, int size, int dir,
     }
     else
     {
-        if ( !hvm_send_assist_req(pp) )
+        if ( !hvm_send_assist_req(&up) )
             vio->io_state = HVMIO_none;
         return X86EMUL_RETRY;
     }

  finish_access_vmport:
-    memset(&op, 0, sizeof(op));
-    op.dir = dir;
-    op.addr = (uint16_t)regs->rdx;
-    op.data_is_ptr = 0;
-    op.data = vio->io_data;
-    hvmtrace_io_assist(0, &op);
+    {
+        ioreq_t op = {
+            .type = IOREQ_TYPE_PIO,
+            .dir = dir,
+            .addr = addr,
+            .data_is_ptr = 0,
+            .data = vio->io_data,
+        };
+
+        hvmtrace_io_assist(0, &op);
+    }

     memcpy(&regs->rax, &vio->io_data, vio->io_size);
-
     return X86EMUL_OKAY;
 }

@@ -128,14 +130,14 @@ static int hvmemul_do_io(
 {
     struct vcpu *curr = current;
     struct hvm_vcpu_io *vio;
-    ioreq_t p = {
-        .type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO,
-        .addr = addr,
-        .size = size,
-        .dir = dir,
-        .df = df,
-        .data = ram_gpa,
-        .data_is_ptr = (p_data == NULL),
+    union_ioreq_t up = {
+        .oreq.type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO,
+        .oreq.addr = addr,
+        .oreq.size = size,
+        .oreq.dir = dir,
+        .oreq.df = df,
+        .oreq.data = ram_gpa,
+        .oreq.data_is_ptr = (p_data == NULL),
     };
     unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
     p2m_type_t p2mt;
@@ -173,15 +175,15 @@ static int hvmemul_do_io(
         return X86EMUL_UNHANDLEABLE;
     }

-    if ( !p.data_is_ptr && (dir == IOREQ_WRITE) )
+    if ( !up.oreq.data_is_ptr && (dir == IOREQ_WRITE) )
     {
-        memcpy(&p.data, p_data, size);
+        memcpy(&up.oreq.data, p_data, size);
         p_data = NULL;
     }

     vio = &curr->arch.hvm_vcpu.hvm_io;

-    if ( is_mmio && !p.data_is_ptr )
+    if ( is_mmio && !up.oreq.data_is_ptr )
     {
         /* Part of a multi-cycle read or write? */
         if ( dir == IOREQ_WRITE )
@@ -225,7 +227,7 @@ static int hvmemul_do_io(
         goto finish_access;
     case HVMIO_dispatched:
/* May have to wait for previous cycle of a multi-write to complete. */
-        if ( is_mmio && !p.data_is_ptr && (dir == IOREQ_WRITE) &&
+        if ( is_mmio && !up.oreq.data_is_ptr && (dir == IOREQ_WRITE) &&
              (addr == (vio->mmio_large_write_pa +
                        vio->mmio_large_write_bytes)) )
         {
@@ -258,31 +260,31 @@ static int hvmemul_do_io(
     if ( vio->mmio_retrying )
         *reps = 1;

-    p.count = *reps;
+    up.oreq.count = *reps;

     if ( dir == IOREQ_WRITE )
-        hvmtrace_io_assist(is_mmio, &p);
+        hvmtrace_io_assist(is_mmio, &up.oreq);

     if ( is_mmio )
     {
-        rc = hvm_mmio_intercept(&p);
+        rc = hvm_mmio_intercept(&up.oreq);
         if ( rc == X86EMUL_UNHANDLEABLE )
-            rc = hvm_buffered_io_intercept(&p);
+            rc = hvm_buffered_io_intercept(&up.oreq);
     }
     else
     {
-        rc = hvm_portio_intercept(&p);
+        rc = hvm_portio_intercept(&up.oreq);
     }

     switch ( rc )
     {
     case X86EMUL_OKAY:
     case X86EMUL_RETRY:
-        *reps = p.count;
-        p.state = STATE_IORESP_READY;
+        *reps = up.oreq.count;
+        up.oreq.state = STATE_IORESP_READY;
         if ( !vio->mmio_retry )
         {
-            hvm_io_assist(&p);
+            hvm_io_assist(&up);
             vio->io_state = HVMIO_none;
         }
         else
@@ -299,7 +301,7 @@ static int hvmemul_do_io(
         else
         {
             rc = X86EMUL_RETRY;
-            if ( !hvm_send_assist_req(&p) )
+            if ( !hvm_send_assist_req(&up) )
                 vio->io_state = HVMIO_none;
             else if ( p_data == NULL )
                 rc = X86EMUL_OKAY;
@@ -318,12 +320,12 @@ static int hvmemul_do_io(

  finish_access:
     if ( dir == IOREQ_READ )
-        hvmtrace_io_assist(is_mmio, &p);
+        hvmtrace_io_assist(is_mmio, &up.oreq);

     if ( p_data != NULL )
         memcpy(p_data, &vio->io_data, size);

-    if ( is_mmio && !p.data_is_ptr )
+    if ( is_mmio && !up.oreq.data_is_ptr )
     {
         /* Part of a multi-cycle read or write? */
         if ( dir == IOREQ_WRITE )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 83a7000..d1f3e52 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -372,9 +372,9 @@ void hvm_migrate_pirqs(struct vcpu *v)
     spin_unlock(&d->event_lock);
 }

-static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct vcpu *v)
+static union_ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct vcpu *v)
 {
-    shared_iopage_t *p = s->ioreq.va;
+    union_shared_iopage_t *p = s->ioreq.va;

     ASSERT((v == current) || !vcpu_runnable(v));
     ASSERT(p != NULL);
@@ -391,34 +391,35 @@ bool_t hvm_io_pending(struct vcpu *v)
&d->arch.hvm_domain.ioreq_server.list,
                           list_entry )
     {
-        ioreq_t *p = get_ioreq(s, v);
+        union_ioreq_t *up = get_ioreq(s, v);

-        if ( p->state != STATE_IOREQ_NONE )
+        if ( up->oreq.state != STATE_IOREQ_NONE )
             return 1;
     }

     return 0;
 }

-static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
+static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv,
+                              union_ioreq_t *up)
 {
-    /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
-    while ( p->state != STATE_IOREQ_NONE )
+ /* NB. Optimised for common case (up->oreq.state == STATE_IOREQ_NONE). */
+    while ( up->oreq.state != STATE_IOREQ_NONE )
     {
-        switch ( p->state )
+        switch ( up->oreq.state )
         {
         case STATE_IORESP_READY: /* IORESP_READY -> NONE */
             rmb(); /* see IORESP_READY /then/ read contents of ioreq */
-            hvm_io_assist(p);
+            hvm_io_assist(up);
             break;
case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
         case STATE_IOREQ_INPROCESS:
             wait_on_xen_event_channel(sv->ioreq_evtchn,
-                                      (p->state != STATE_IOREQ_READY) &&
-                                      (p->state != STATE_IOREQ_INPROCESS));
+ (up->oreq.state != STATE_IOREQ_READY) && + (up->oreq.state != STATE_IOREQ_INPROCESS));
             break;
         default:
- gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state); + gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", up->oreq.state);
             domain_crash(sv->vcpu->domain);
             return 0; /* bail */
         }
@@ -598,9 +599,9 @@ static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server *s,

     if ( s->ioreq.va != NULL )
     {
-        ioreq_t *p = get_ioreq(s, sv->vcpu);
+        union_ioreq_t *up = get_ioreq(s, sv->vcpu);

-        p->vp_eport = sv->ioreq_evtchn;
+        up->oreq.vp_eport = sv->ioreq_evtchn;
     }
 }

@@ -2526,27 +2527,27 @@ bool_t hvm_send_assist_req_to_ioreq_server(struct hvm_ioreq_server *s,
         if ( sv->vcpu == curr )
         {
             evtchn_port_t port = sv->ioreq_evtchn;
-            ioreq_t *p = get_ioreq(s, curr);
+            union_ioreq_t *up = get_ioreq(s, curr);

-            if ( unlikely(p->state != STATE_IOREQ_NONE) )
+            if ( unlikely(up->oreq.state != STATE_IOREQ_NONE) )
             {
                 gdprintk(XENLOG_ERR,
                          "Device model set bad IO state %d.\n",
-                         p->state);
+                         up->oreq.state);
                 goto crash;
             }

-            if ( unlikely(p->vp_eport != port) )
+            if ( unlikely(up->oreq.vp_eport != port) )
             {
                 gdprintk(XENLOG_ERR,
                          "Device model set bad event channel %d.\n",
-                         p->vp_eport);
+                         up->oreq.vp_eport);
                 goto crash;
             }

             proto_p->state = STATE_IOREQ_NONE;
             proto_p->vp_eport = port;
-            *p = *proto_p;
+            up->oreq = *proto_p;

             prepare_wait_on_xen_event_channel(port);

@@ -2555,7 +2556,7 @@ bool_t hvm_send_assist_req_to_ioreq_server(struct hvm_ioreq_server *s, * contents. prepare_wait_on_xen_event_channel() is an implicit
              * barrier.
              */
-            p->state = STATE_IOREQ_READY;
+            up->oreq.state = STATE_IOREQ_READY;
             notify_via_xen_event_channel(d, port);
             break;
         }
@@ -2568,44 +2569,44 @@ bool_t hvm_send_assist_req_to_ioreq_server(struct hvm_ioreq_server *s,
     return 0;
 }

-static bool_t hvm_complete_assist_req(ioreq_t *p)
+static bool_t hvm_complete_assist_req(union_ioreq_t *up)
 {
-    switch ( p->type )
+    switch ( up->oreq.type )
     {
     case IOREQ_TYPE_COPY:
     case IOREQ_TYPE_PIO:
-        if ( p->dir == IOREQ_READ )
+        if ( up->oreq.dir == IOREQ_READ )
         {
-            if ( !p->data_is_ptr )
-                p->data = ~0ul;
+            if ( !up->oreq.data_is_ptr )
+                up->oreq.data = ~0ul;
             else
             {
-                int i, step = p->df ? -p->size : p->size;
+                int i, step = up->oreq.df ? -up->oreq.size : up->oreq.size;
                 uint32_t data = ~0;

-                for ( i = 0; i < p->count; i++ )
-                    hvm_copy_to_guest_phys(p->data + step * i, &data,
-                                           p->size);
+                for ( i = 0; i < up->oreq.count; i++ )
+                    hvm_copy_to_guest_phys(up->oreq.data + step * i, &data,
+                                           up->oreq.size);
             }
         }
         /* FALLTHRU */
     default:
-        p->state = STATE_IORESP_READY;
-        hvm_io_assist(p);
+        up->oreq.state = STATE_IORESP_READY;
+        hvm_io_assist(up);
         break;
     }

     return 1;
 }

-bool_t hvm_send_assist_req(ioreq_t *p)
+bool_t hvm_send_assist_req(union_ioreq_t *up)
 {
- struct hvm_ioreq_server *s = hvm_select_ioreq_server(current->domain, p); + struct hvm_ioreq_server *s = hvm_select_ioreq_server(current->domain, &up->oreq);

     if ( !s )
-        return hvm_complete_assist_req(p);
+        return hvm_complete_assist_req(up);

-    return hvm_send_assist_req_to_ioreq_server(s, p);
+    return hvm_send_assist_req_to_ioreq_server(s, &up->oreq);
 }

 void hvm_broadcast_assist_req(ioreq_t *p)
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 5d7d221..a06a507 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -169,13 +169,13 @@ int handle_pio(uint16_t port, unsigned int size, int dir)
     return 1;
 }

-void hvm_io_assist(ioreq_t *p)
+void hvm_io_assist(union_ioreq_t *up)
 {
     struct vcpu *curr = current;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     enum hvm_io_state io_state;

-    p->state = STATE_IOREQ_NONE;
+    up->oreq.state = STATE_IOREQ_NONE;

     io_state = vio->io_state;
     vio->io_state = HVMIO_none;
@@ -184,39 +184,38 @@ void hvm_io_assist(ioreq_t *p)
     {
     case HVMIO_awaiting_completion:
         vio->io_state = HVMIO_completed;
-        vio->io_data = p->data;
+        vio->io_data = up->oreq.data;
         break;
     case HVMIO_handle_mmio_awaiting_completion:
         vio->io_state = HVMIO_completed;
-        vio->io_data = p->data;
+        vio->io_data = up->oreq.data;
         (void)handle_mmio();
         break;
     case HVMIO_handle_pio_awaiting_completion:
         if ( vio->io_size == 4 ) /* Needs zero extension. */
-            guest_cpu_user_regs()->rax = (uint32_t)p->data;
+            guest_cpu_user_regs()->rax = (uint32_t)up->oreq.data;
         else
-            memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size);
+ memcpy(&guest_cpu_user_regs()->rax, &up->oreq.data, vio->io_size);
         break;
     case HVMIO_handle_vmport_awaiting_completion:
     {
         struct cpu_user_regs *regs = guest_cpu_user_regs();
-        vmware_ioreq_t *vp = (vmware_ioreq_t *)p;

         /* Always zero extension for eax */
-        regs->rax = vp->eax;
+        regs->rax = up->vreq.eax;
         /* Only change the 32bit part of the register */
-        regs->_ebx = vp->ebx;
-        regs->_ecx = vp->ecx;
-        regs->_edx = vp->edx;
-        regs->_esi = vp->esi;
-        regs->_edi = vp->edi;
+        regs->_ebx = up->vreq.ebx;
+        regs->_ecx = up->vreq.ecx;
+        regs->_edx = up->vreq.edx;
+        regs->_esi = up->vreq.esi;
+        regs->_edi = up->vreq.edi;
     }
         break;
     default:
         break;
     }

-    if ( p->state == STATE_IOREQ_NONE )
+    if ( up->oreq.state == STATE_IOREQ_NONE )
     {
         msix_write_completion(curr);
         vcpu_end_shutdown_deferral(curr);
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 0910147..844271c 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -228,7 +228,7 @@ int hvm_vcpu_cacheattr_init(struct vcpu *v);
 void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
 void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);

-bool_t hvm_send_assist_req(ioreq_t *p);
+bool_t hvm_send_assist_req(union_ioreq_t *up);
 void hvm_broadcast_assist_req(ioreq_t *p);

 void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index d257161..1a183a1 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -123,7 +123,7 @@ int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
                                  struct npfec);
 int handle_pio(uint16_t port, unsigned int size, int dir);
 void hvm_interrupt_post(struct vcpu *v, int vector, int type);
-void hvm_io_assist(ioreq_t *p);
+void hvm_io_assist(union_ioreq_t *p);
 void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
                   const union vioapic_redir_entry *ent);
 void msix_write_completion(struct vcpu *);
diff --git a/xen/include/public/hvm/ioreq.h b/xen/include/public/hvm/ioreq.h
index 7d5ba58..d75ce66 100644
--- a/xen/include/public/hvm/ioreq.h
+++ b/xen/include/public/hvm/ioreq.h
@@ -85,11 +85,22 @@ struct vmware_ioreq {
 };
 typedef struct vmware_ioreq vmware_ioreq_t;

+union union_ioreq {
+    ioreq_t oreq;
+    vmware_ioreq_t vreq;
+};
+typedef union union_ioreq union_ioreq_t;
+
 struct shared_iopage {
     struct ioreq vcpu_ioreq[1];
 };
 typedef struct shared_iopage shared_iopage_t;

+struct union_shared_iopage {
+    union union_ioreq vcpu_ioreq[1];
+};
+typedef struct union_shared_iopage union_shared_iopage_t;
+
 struct buf_ioreq {
     uint8_t  type;   /* I/O type                    */
     uint8_t  pad:1;
--
1.7.11.7


    -Don Slutz


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.