[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC][PATCH 01/13] Kemari: add ECS_TAP state to event channel



This is an updated version of the following patch.  No major changes.

http://lists.xensource.com/archives/html/xen-devel/2009-03/msg00369.html

Signed-off-by: Yoshi Tamura <tamura.yoshiaki@xxxxxxxxxxxxx>
Signed-off-by: Yoshisato Yanagisawa <yanagisawa.yoshisato@xxxxxxxxxxxxx>
---
 xen/common/event_channel.c |  150 ++++++++++++++++++++++++++++++++++++++++++++-
 xen/include/xen/event.h    |   14 ++++
 xen/include/xen/sched.h    |   10 +++
 3 files changed, 173 insertions(+), 1 deletion(-)

diff -r b249f3e979a5 -r cf6a910e3663 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Mon Mar 09 10:32:24 2009 +0000
+++ b/xen/include/xen/sched.h   Wed Mar 11 18:03:47 2009 +0900
@@ -20,6 +20,7 @@
 #include <xen/rcupdate.h>
 #include <xen/irq.h>
 #include <xen/mm.h>
+#include <xen/kemari.h>

 #ifdef CONFIG_COMPAT
 #include <compat/vcpu.h>
@@ -47,6 +48,7 @@
 #define ECS_PIRQ         4 /* Channel is bound to a physical IRQ line.       */
 #define ECS_VIRQ         5 /* Channel is bound to a virtual IRQ line.        */
 #define ECS_IPI          6 /* Channel is bound to a virtual IPI line.        */
+#define ECS_TAP          7 /* Channel is bound and tapped.                   */
     u8  state;             /* ECS_* */
     u8  consumer_is_xen;   /* Consumed by Xen or by guest? */
     u16 notify_vcpu_id;    /* VCPU for local delivery notification */
@@ -61,6 +63,11 @@
         u16 pirq;      /* state == ECS_PIRQ */
         u16 virq;      /* state == ECS_VIRQ */
     } u;
+    struct {
+        u8 mode;    /* Tap IN, OUT or both.  */
+        /* Fucntion to call when an event is detected. */
+        long (*redirect) (struct evtchn *lchn, struct evtchn *rchn);
+    } tap;
 #ifdef FLASK_ENABLE
     void *ssid;
 #endif
@@ -255,6 +262,9 @@
     /* OProfile support. */
     struct xenoprof *xenoprof;
     int32_t time_offset_seconds;
+
+    /* Kemari support. */
+    struct kemari *kemari;

     struct rcu_head rcu;

diff -r b249f3e979a5 -r cf6a910e3663 xen/include/xen/event.h
--- a/xen/include/xen/event.h   Mon Mar 09 10:32:24 2009 +0000
+++ b/xen/include/xen/event.h   Wed Mar 11 18:03:47 2009 +0900
@@ -82,4 +82,18 @@
         mb(); /* set blocked status /then/ caller does his work */      \
     } while ( 0 )

+struct evtchn_bind_tap {
+    /* IN parameters. */
+    domid_t       tap_dom;
+    uint32_t      tap_port;
+    uint8_t       mode;
+    long          (*redirect) (struct evtchn *lchn, struct evtchn *rchn);
+};
+
+void notify_via_xen_evtchn_tap(struct domain *ld, int lport);
+
+long evtchn_bind_tap(struct evtchn_bind_tap *bind_tap);
+
+long evtchn_unbind_tap(struct evtchn_bind_tap *bind_tap);
+
 #endif /* __XEN_EVENT_H__ */
diff -r b249f3e979a5 -r cf6a910e3663 xen/common/event_channel.c
--- a/xen/common/event_channel.c        Mon Mar 09 10:32:24 2009 +0000
+++ b/xen/common/event_channel.c        Wed Mar 11 18:03:47 2009 +0900
@@ -191,7 +191,8 @@
     if ( !port_is_valid(rd, rport) )
         ERROR_EXIT_DOM(-EINVAL, rd);
     rchn = evtchn_from_port(rd, rport);
-    if ( (rchn->state != ECS_UNBOUND) ||
+    /* kemari needs to reuse rchn information */
+    if ( (rchn->state != ECS_UNBOUND) &&
          (rchn->u.unbound.remote_domid != ld->domain_id) )
         ERROR_EXIT_DOM(-EINVAL, rd);

@@ -338,6 +339,113 @@
     return rc;
 }

+long evtchn_bind_tap(struct evtchn_bind_tap *bind_tap)
+{
+    struct evtchn *lchn, *rchn;
+    struct domain *ld, *rd;
+    int            lport = bind_tap->tap_port, rport;
+    domid_t        ldom = bind_tap->tap_dom;
+    long ret;
+
+    if ( (ld = rcu_lock_domain_by_id(ldom)) == NULL )
+        return -ESRCH;
+
+    spin_lock(&ld->event_lock);
+
+    ret = -EINVAL;
+    if ( !port_is_valid(ld, lport) )
+        goto lchn_out;
+    lchn = evtchn_from_port(ld, lport);
+    if ( lchn->state != ECS_INTERDOMAIN )
+        goto lchn_out;
+
+    ret = -ESRCH;
+    rd = lchn->u.interdomain.remote_dom;
+    if ( rd == NULL )
+        goto lchn_out;
+
+    spin_lock(&rd->event_lock);
+
+    rport = lchn->u.interdomain.remote_port;
+    if ( !port_is_valid(rd, rport) )
+        goto rchn_out;
+    rchn = evtchn_from_port(rd, rport);
+    if ( rchn->state != ECS_INTERDOMAIN )
+        goto rchn_out;
+
+    lchn->state = ECS_TAP;
+    lchn->tap.mode = bind_tap->mode;
+    lchn->tap.redirect = bind_tap->redirect;
+
+    rchn->state = ECS_TAP;
+    rchn->tap.redirect = bind_tap->redirect;
+
+    ret = 0;
+
+ rchn_out:
+    spin_unlock(&rd->event_lock);
+
+ lchn_out:
+    spin_unlock(&ld->event_lock);
+
+    rcu_unlock_domain(ld);
+
+    return ret;
+}
+
+long evtchn_unbind_tap(struct evtchn_bind_tap *bind_tap)
+{
+    struct evtchn *lchn, *rchn;
+    struct domain *ld, *rd;
+    int            lport = bind_tap->tap_port, rport;
+    domid_t        ldom = bind_tap->tap_dom;
+    long ret;
+
+    if ( (ld = rcu_lock_domain_by_id(ldom)) == NULL )
+        return -ESRCH;
+
+    spin_lock(&ld->event_lock);
+
+    ret = -EINVAL;
+    if ( !port_is_valid(ld, lport) )
+        goto lchn_out;
+    lchn = evtchn_from_port(ld, lport);
+    if ( lchn->state != ECS_TAP )
+        goto lchn_out;
+
+    ret = -ESRCH;
+    rd = lchn->u.interdomain.remote_dom;
+    if ( rd == NULL )
+        goto lchn_out;
+
+    spin_lock(&rd->event_lock);
+
+    rport = lchn->u.interdomain.remote_port;
+    if ( !port_is_valid(rd, rport) )
+        goto rchn_out;
+    rchn = evtchn_from_port(rd, rport);
+    if ( rchn->state != ECS_TAP )
+        goto rchn_out;
+
+    lchn->state = ECS_INTERDOMAIN;
+    lchn->tap.mode = bind_tap->mode;
+    lchn->tap.redirect = NULL;
+
+    rchn->state = ECS_INTERDOMAIN;
+    rchn->tap.redirect = NULL;
+
+    ret = 0;
+
+ rchn_out:
+    spin_unlock(&rd->event_lock);
+
+ lchn_out:
+    spin_unlock(&ld->event_lock);
+
+    rcu_unlock_domain(ld);
+
+    return ret;
+}

 static long __evtchn_close(struct domain *d1, int port1)
 {
@@ -393,6 +501,7 @@
     case ECS_IPI:
         break;

+    case ECS_TAP:
     case ECS_INTERDOMAIN:
         if ( d2 == NULL )
         {
@@ -430,6 +539,14 @@
         BUG_ON(!port_is_valid(d2, port2));

         chn2 = evtchn_from_port(d2, port2);
+
+        if ( chn1->state == ECS_TAP )
+        {
+            chn1->tap.redirect = NULL;
+            chn2->tap.redirect = NULL;
+            chn2->state = ECS_INTERDOMAIN;
+        }
+
         BUG_ON(chn2->state != ECS_INTERDOMAIN);
         BUG_ON(chn2->u.interdomain.remote_dom != d1);

@@ -499,6 +616,13 @@

     switch ( lchn->state )
     {
+    case ECS_TAP:
+        rd    = lchn->u.interdomain.remote_dom;
+        rport = lchn->u.interdomain.remote_port;
+        rchn  = evtchn_from_port(rd, rport);
+
+        lchn->tap.redirect(lchn, rchn);
+
     case ECS_INTERDOMAIN:
         rd    = lchn->u.interdomain.remote_dom;
         rport = lchn->u.interdomain.remote_port;
@@ -1009,6 +1133,30 @@
     spin_unlock(&ld->event_lock);
 }

+void notify_via_xen_evtchn_tap(struct domain *ld, int lport)
+{
+    struct evtchn *lchn, *rchn;
+    struct domain *rd;
+    int            rport;
+
+    if (ld != current->domain)
+        spin_lock(&ld->event_lock);
+
+    ASSERT(port_is_valid(ld, lport));
+    lchn = evtchn_from_port(ld, lport);
+    ASSERT(lchn->consumer_is_xen);
+
+    if ( likely(lchn->state == ECS_INTERDOMAIN) )
+    {
+        rd    = lchn->u.interdomain.remote_dom;
+        rport = lchn->u.interdomain.remote_port;
+        rchn  = evtchn_from_port(rd, rport);
+        evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
+    }
+
+    if (ld != current->domain)
+        spin_unlock(&ld->event_lock);
+}

 int evtchn_init(struct domain *d)
 {




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.