[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 20/23] vixen: event channel passthrough support



From: Anthony Liguori <aliguori@xxxxxxxxxx>

For Vixen, we do not want to pass through all event channel
operations as HVM guests do not have nearly as many event channel
interactions as PV and on older versions of Xen, there is no reliable
way to wake up an event channel on VCPU != 0 leading to a variety of
deadlocks.

By only forwarding interdomain and unbound event channel operations,
we can avoid this problem since these can always be bound to VCPU 0
on older versions of Xen HVM.  On newer versions of Xen, we allow the
event channels to be bound to the VCPU requested by the inner guest.

To ensure that we keep everything in sync, all event channels end up
allocating an unbound event channel in the parent Xen and we rely on
the parent Xen to owner the event channel address space.

Signed-off-by: Jan H. Schönherr <jschoenh@xxxxxxxxx>
Signed-off-by: Anthony Liguori <aliguori@xxxxxxxxxx>
---
 xen/common/event_channel.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 81 insertions(+)

diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 3dee73b..54ea720 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -199,10 +199,34 @@ static int allocate_port(struct domain *d, int port)
     return port;
 }
 
+static int vixen_get_free_port(struct domain *d)
+{
+    int rc;
+    struct evtchn_alloc_unbound unbound = { .dom = DOMID_SELF,
+                                            .remote_dom = DOMID_SELF };
+
+    rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &unbound);
+    if ( rc )
+        return rc;
+
+    rc = allocate_port(d, unbound.port);
+    if ( rc < 0 )
+    {
+        struct evtchn_close close = { .port = unbound.port };
+        HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
+        printk("Vixen: failed to allocate event channel %d => %d\n",
+               unbound.port, rc);
+    }
+    return rc;
+}
+
 static int get_free_port(struct domain *d)
 {
     int port;
 
+    if ( is_vixen() )
+        return vixen_get_free_port(d);
+
     for ( port = 0; port_is_valid(d, port); port++ )
     {
         if ( port > d->max_evtchn_port )
@@ -252,6 +276,11 @@ static void free_evtchn(struct domain *d, struct evtchn 
*chn)
     xsm_evtchn_close_post(chn);
 }
 
+static bool is_loopback(domid_t ldom, domid_t rdom)
+{
+    return ldom == DOMID_SELF && rdom == DOMID_SELF;
+}
+
 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
 {
     struct evtchn *chn;
@@ -266,6 +295,23 @@ static long evtchn_alloc_unbound(evtchn_alloc_unbound_t 
*alloc)
 
     spin_lock(&d->event_lock);
 
+    if ( is_vixen() && !is_loopback(alloc->dom, alloc->remote_dom) ) {
+        rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, alloc);
+        if ( rc )
+            goto out;
+
+        rc = evtchn_alloc_proxy(d, alloc->port, ECS_UNBOUND);
+        if ( rc )
+        {
+            struct evtchn_close close = { .port = alloc->port };
+            HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
+            printk("Vixen: failed to reserve unbound event channel %d => 
%ld\n",
+                   alloc->port, rc);
+        }
+
+        goto out;
+    }
+
     if ( (port = get_free_port(d)) < 0 )
         ERROR_EXIT_DOM(port, d);
     chn = evtchn_from_port(d, port);
@@ -315,6 +361,27 @@ static void double_evtchn_unlock(struct evtchn *lchn, 
struct evtchn *rchn)
         spin_unlock(&rchn->lock);
 }
 
+static long vixen_evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
+{
+    struct domain *d = current->domain;
+    long rc;
+
+    rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, bind);
+    if ( rc )
+        return rc;
+
+    rc = evtchn_alloc_proxy(d, bind->local_port, ECS_INTERDOMAIN);
+    if ( rc )
+    {
+        struct evtchn_close close = { .port = bind->local_port };
+        HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
+        printk("Vixen: failed to reserve inter-domain event channel %d => 
%ld\n",
+               bind->local_port, rc);
+    }
+
+    return rc;
+}
+
 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
 {
     struct evtchn *lchn, *rchn;
@@ -323,6 +390,9 @@ static long 
evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
     domid_t        rdom = bind->remote_dom;
     long           rc;
 
+    if ( is_vixen() && !is_loopback(DOMID_SELF, bind->remote_dom) )
+        return vixen_evtchn_bind_interdomain(bind);
+
     if ( rdom == DOMID_SELF )
         rdom = current->domain->domain_id;
 
@@ -581,6 +651,13 @@ static long evtchn_close(struct domain *d1, int port1, 
bool_t guest)
         goto out;
     }
 
+    if ( is_vixen() ) {
+        struct evtchn_close close = { .port = port1 };
+        rc = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
+        if (rc != 0)
+            goto out;
+    }
+
     switch ( chn1->state )
     {
     case ECS_FREE:
@@ -1215,6 +1292,10 @@ long do_event_channel_op(int cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 
     case EVTCHNOP_init_control: {
         struct evtchn_init_control init_control;
+
+        if ( is_vixen() )
+            return -ENOSYS;
+
         if ( copy_from_guest(&init_control, arg, 1) != 0 )
             return -EFAULT;
         rc = evtchn_fifo_init_control(&init_control);
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.