[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] ioreq-server: make buffered ioreq handling optional



commit 41b41c0b0d2e1b83929adcc8de703ea0cc2417a6
Author:     Paul Durrant <paul.durrant@xxxxxxxxxx>
AuthorDate: Mon Jun 2 10:02:25 2014 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Jun 2 10:02:25 2014 +0200

    ioreq-server: make buffered ioreq handling optional
    
    Some emulators will only register regions that require non-buffered
    access. (In practice the only region that a guest uses buffered access
    for today is the VGA aperture from 0xa0000-0xbffff). This patch therefore
    makes allocation of the buffered ioreq page and event channel optional for
    secondary ioreq servers.
    
    If a guest attempts buffered access to an ioreq server that does not
    support it, the access will be handled via the normal synchronous path.
    
    Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
    Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
    Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 tools/libxc/xc_domain.c         |    2 +
 tools/libxc/xenctrl.h           |    2 +
 xen/arch/x86/hvm/hvm.c          |   78 +++++++++++++++++++++++++++-----------
 xen/include/public/hvm/hvm_op.h |   24 +++++++----
 4 files changed, 74 insertions(+), 32 deletions(-)

diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 17a8417..37ed141 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1286,6 +1286,7 @@ int xc_get_hvm_param(xc_interface *handle, domid_t dom, 
int param, unsigned long
 
 int xc_hvm_create_ioreq_server(xc_interface *xch,
                                domid_t domid,
+                               int handle_bufioreq,
                                ioservid_t *id)
 {
     DECLARE_HYPERCALL;
@@ -1301,6 +1302,7 @@ int xc_hvm_create_ioreq_server(xc_interface *xch,
     hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
 
     arg->domid = domid;
+    arg->handle_bufioreq = !!handle_bufioreq;
 
     rc = do_xen_hypercall(xch, &hypercall);
 
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 2045084..400f0df 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1802,11 +1802,13 @@ int xc_get_hvm_param(xc_interface *handle, domid_t dom, 
int param, unsigned long
  *
  * @parm xch a handle to an open hypervisor interface.
  * @parm domid the domain id to be serviced
+ * @parm handle_bufioreq should the IOREQ Server handle buffered requests?
  * @parm id pointer to an ioservid_t to receive the IOREQ Server id.
  * @return 0 on success, -1 on failure.
  */
 int xc_hvm_create_ioreq_server(xc_interface *xch,
                                domid_t domid,
+                               int handle_bufioreq,
                                ioservid_t *id);
 
 /**
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 3a30071..59836a1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -655,7 +655,7 @@ static int hvm_ioreq_server_add_vcpu(struct 
hvm_ioreq_server *s,
 
     sv->ioreq_evtchn = rc;
 
-    if ( v->vcpu_id == 0 )
+    if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
     {
         struct domain *d = s->domain;
 
@@ -706,7 +706,7 @@ static void hvm_ioreq_server_remove_vcpu(struct 
hvm_ioreq_server *s,
 
         list_del(&sv->list_entry);
 
-        if ( v->vcpu_id == 0 )
+        if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
             free_xen_event_channel(v, s->bufioreq_evtchn);
 
         free_xen_event_channel(v, sv->ioreq_evtchn);
@@ -733,7 +733,7 @@ static void hvm_ioreq_server_remove_all_vcpus(struct 
hvm_ioreq_server *s)
 
         list_del(&sv->list_entry);
 
-        if ( v->vcpu_id == 0 )
+        if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
             free_xen_event_channel(v, s->bufioreq_evtchn);
 
         free_xen_event_channel(v, sv->ioreq_evtchn);
@@ -745,7 +745,7 @@ static void hvm_ioreq_server_remove_all_vcpus(struct 
hvm_ioreq_server *s)
 }
 
 static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
-                                      bool_t is_default)
+                                      bool_t is_default, bool_t 
handle_bufioreq)
 {
     struct domain *d = s->domain;
     unsigned long ioreq_pfn, bufioreq_pfn;
@@ -754,6 +754,12 @@ static int hvm_ioreq_server_map_pages(struct 
hvm_ioreq_server *s,
     if ( is_default )
     {
         ioreq_pfn = d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
+
+        /*
+         * The default ioreq server must handle buffered ioreqs, for
+         * backwards compatibility.
+         */
+        ASSERT(handle_bufioreq);
         bufioreq_pfn = d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN];
     }
     else
@@ -762,18 +768,24 @@ static int hvm_ioreq_server_map_pages(struct 
hvm_ioreq_server *s,
         if ( rc )
             goto fail1;
 
-        rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn);
-        if ( rc )
-            goto fail2;
+        if ( handle_bufioreq )
+        {
+            rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn);
+            if ( rc )
+                goto fail2;
+        }
     }
 
     rc = hvm_map_ioreq_page(s, 0, ioreq_pfn);
     if ( rc )
         goto fail3;
 
-    rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn);
-    if ( rc )
-        goto fail4;
+    if ( handle_bufioreq )
+    {
+        rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn);
+        if ( rc )
+            goto fail4;
+    }
 
     return 0;
 
@@ -781,7 +793,7 @@ fail4:
     hvm_unmap_ioreq_page(s, 0);
 
 fail3:
-    if ( !is_default )
+    if ( !is_default && handle_bufioreq )
         hvm_free_ioreq_gmfn(d, bufioreq_pfn);
 
 fail2:
@@ -796,13 +808,18 @@ static void hvm_ioreq_server_unmap_pages(struct 
hvm_ioreq_server *s,
                                          bool_t is_default)
 {
     struct domain *d = s->domain;
+    bool_t handle_bufioreq = ( s->bufioreq.va != NULL );
+
+    if ( handle_bufioreq )
+        hvm_unmap_ioreq_page(s, 1);
 
-    hvm_unmap_ioreq_page(s, 1);
     hvm_unmap_ioreq_page(s, 0);
 
     if ( !is_default )
     {
-        hvm_free_ioreq_gmfn(d, s->bufioreq.gmfn);
+        if ( handle_bufioreq )
+            hvm_free_ioreq_gmfn(d, s->bufioreq.gmfn);
+
         hvm_free_ioreq_gmfn(d, s->ioreq.gmfn);
     }
 }
@@ -867,6 +884,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server 
*s,
 {
     struct domain *d = s->domain;
     struct hvm_ioreq_vcpu *sv;
+    bool_t handle_bufioreq = ( s->bufioreq.va != NULL );
 
     spin_lock(&s->lock);
 
@@ -876,7 +894,9 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server 
*s,
     if ( !is_default )
     {
         hvm_remove_ioreq_gmfn(d, &s->ioreq);
-        hvm_remove_ioreq_gmfn(d, &s->bufioreq);
+
+        if ( handle_bufioreq )
+            hvm_remove_ioreq_gmfn(d, &s->bufioreq);
     }
 
     s->enabled = 1;
@@ -894,6 +914,7 @@ static void hvm_ioreq_server_disable(struct 
hvm_ioreq_server *s,
                                     bool_t is_default)
 {
     struct domain *d = s->domain;
+    bool_t handle_bufioreq = ( s->bufioreq.va != NULL );
 
     spin_lock(&s->lock);
 
@@ -902,7 +923,9 @@ static void hvm_ioreq_server_disable(struct 
hvm_ioreq_server *s,
 
     if ( !is_default )
     {
-        hvm_add_ioreq_gmfn(d, &s->bufioreq);
+        if ( handle_bufioreq )
+            hvm_add_ioreq_gmfn(d, &s->bufioreq);
+
         hvm_add_ioreq_gmfn(d, &s->ioreq);
     }
 
@@ -914,7 +937,7 @@ static void hvm_ioreq_server_disable(struct 
hvm_ioreq_server *s,
 
 static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, struct domain *d,
                                  domid_t domid, bool_t is_default,
-                                 ioservid_t id)
+                                 bool_t handle_bufioreq, ioservid_t id)
 {
     struct vcpu *v;
     int rc;
@@ -931,7 +954,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server 
*s, struct domain *d,
     if ( rc )
         goto fail1;
 
-    rc = hvm_ioreq_server_map_pages(s, is_default);
+    rc = hvm_ioreq_server_map_pages(s, is_default, handle_bufioreq);
     if ( rc )
         goto fail2;
 
@@ -991,7 +1014,8 @@ static ioservid_t next_ioservid(struct domain *d)
 }
 
 static int hvm_create_ioreq_server(struct domain *d, domid_t domid,
-                                   bool_t is_default, ioservid_t *id)
+                                   bool_t is_default, bool_t handle_bufioreq,
+                                   ioservid_t *id)
 {
     struct hvm_ioreq_server *s;
     int rc;
@@ -1008,7 +1032,7 @@ static int hvm_create_ioreq_server(struct domain *d, 
domid_t domid,
     if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
         goto fail2;
 
-    rc = hvm_ioreq_server_init(s, d, domid, is_default,
+    rc = hvm_ioreq_server_init(s, d, domid, is_default, handle_bufioreq,
                                next_ioservid(d));
     if ( rc )
         goto fail3;
@@ -1099,8 +1123,12 @@ static int hvm_get_ioreq_server_info(struct domain *d, 
ioservid_t id,
             continue;
 
         *ioreq_pfn = s->ioreq.gmfn;
-        *bufioreq_pfn = s->bufioreq.gmfn;
-        *bufioreq_port = s->bufioreq_evtchn;
+
+        if ( s->bufioreq.va != NULL )
+        {
+            *bufioreq_pfn = s->bufioreq.gmfn;
+            *bufioreq_port = s->bufioreq_evtchn;
+        }
 
         rc = 0;
         break;
@@ -2378,6 +2406,9 @@ int hvm_buffered_io_send(ioreq_t *p)
     iorp = &s->bufioreq;
     pg = iorp->va;
 
+    if ( !pg )
+        return 0;
+
     /*
      * Return 0 for the cases we can't deal with:
      *  - 'addr' is only a 20-bit field, so we cannot address beyond 1MB
@@ -5137,7 +5168,8 @@ static int hvmop_create_ioreq_server(
     if ( rc != 0 )
         goto out;
 
-    rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0, &op.id);
+    rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
+                                 !!op.handle_bufioreq, &op.id);
     if ( rc != 0 )
         goto out;
 
@@ -5571,7 +5603,7 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
                 
                 /* May need to create server */
                 domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
-                rc = hvm_create_ioreq_server(d, domid, 1, NULL);
+                rc = hvm_create_ioreq_server(d, domid, 1, 1, NULL);
                 if ( rc != 0 && rc != -EEXIST )
                     goto param_fail;
                 /*FALLTHRU*/
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index f5d5d22..eeb0a60 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -258,12 +258,15 @@ typedef uint16_t ioservid_t;
  * HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary
  *                            emulator servicing domain <domid>.
  *
- * The <id> handed back is unique for <domid>.
+ * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
+ * the buffered ioreq ring will not be allocated and hence all emulation
+ * requestes to this server will be synchronous.
  */
 #define HVMOP_create_ioreq_server 17
 struct xen_hvm_create_ioreq_server {
-    domid_t domid; /* IN - domain to be serviced */
-    ioservid_t id; /* OUT - server id */
+    domid_t domid;           /* IN - domain to be serviced */
+    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
+    ioservid_t id;           /* OUT - server id */
 };
 typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
@@ -273,12 +276,15 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
  *                              IOREQ Server <id>. 
  *
  * The emulator needs to map the synchronous ioreq structures and buffered
- * ioreq ring that Xen uses to request emulation. These are hosted in domain
- * <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn> respectively. In addition the
- * emulator needs to bind to event channel <bufioreq_port> to listen for
- * buffered emulation requests. (The event channels used for synchronous
- * emulation requests are specified in the per-CPU ioreq structures in
- * <ioreq_pfn>).
+ * ioreq ring (if it exists) that Xen uses to request emulation. These are
+ * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
+ * respectively. In addition, if the IOREQ Server is handling buffered
+ * emulation requests, the emulator needs to bind to event channel
+ * <bufioreq_port> to listen for them. (The event channels used for
+ * synchronous emulation requests are specified in the per-CPU ioreq
+ * structures in <ioreq_pfn>).
+ * If the IOREQ Server is not handling buffered emulation requests then the
+ * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
  */
 #define HVMOP_get_ioreq_server_info 18
 struct xen_hvm_get_ioreq_server_info {
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.