[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH V2 14/14] Implement 3-level event channel routines



Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/common/event_channel.c |  150 +++++++++++++++++++++++++++++++++++---------
 1 file changed, 122 insertions(+), 28 deletions(-)

diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 54a847e..ae58f00 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -169,11 +169,25 @@ static int evtchn_is_pending_l2(struct domain *d, int 
port)
     return test_bit(port, &shared_info(d, evtchn_pending));
 }
 
+static int evtchn_is_pending_l3(struct domain *d, int port)
+{
+    unsigned int page_no = EVTCHN_PAGE_NO(port);
+    unsigned int offset = EVTCHN_OFFSET_IN_PAGE(port);
+    return test_bit(offset, d->evtchn_pending[page_no]);
+}
+
 static int evtchn_is_masked_l2(struct domain *d, int port)
 {
     return test_bit(port, &shared_info(d, evtchn_mask));
 }
 
+static int evtchn_is_masked_l3(struct domain *d, int port)
+{
+    unsigned int page_no = EVTCHN_PAGE_NO(port);
+    unsigned int offset = EVTCHN_OFFSET_IN_PAGE(port);
+    return test_bit(offset, d->evtchn_mask[page_no]);
+}
+
 int evtchn_is_pending(struct domain *d, int port)
 {
     return d->eops->is_pending(d, port);
@@ -639,10 +653,33 @@ out:
     return ret;
 }
 
+static void __check_vcpu_polling(struct vcpu *v, int port)
+{
+    int vcpuid;
+    struct domain *d = v->domain;
+
+    /* Check if some VCPU might be polling for this event. */
+    if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
+        return;
+
+    /* Wake any interested (or potentially interested) pollers. */
+    for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
+          vcpuid < d->max_vcpus;
+          vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
+    {
+        v = d->vcpu[vcpuid];
+        if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
+             test_and_clear_bit(vcpuid, d->poll_mask) )
+        {
+            v->poll_evtchn = 0;
+            vcpu_unblock(v);
+        }
+    }
+}
+
 static void evtchn_set_pending_l2(struct vcpu *v, int port)
 {
     struct domain *d = v->domain;
-    int vcpuid;
 
     /*
      * The following bit operations must happen in strict order.
@@ -661,23 +698,35 @@ static void evtchn_set_pending_l2(struct vcpu *v, int 
port)
         vcpu_mark_events_pending(v);
     }
 
-    /* Check if some VCPU might be polling for this event. */
-    if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
-        return;
+    __check_vcpu_polling(v, port);
+}
 
-    /* Wake any interested (or potentially interested) pollers. */
-    for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
-          vcpuid < d->max_vcpus;
-          vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
+static void evtchn_set_pending_l3(struct vcpu *v, int port)
+{
+    struct domain *d = v->domain;
+    unsigned int page_no = EVTCHN_PAGE_NO(port);
+    unsigned int offset = EVTCHN_OFFSET_IN_PAGE(port);
+    unsigned int l1bit = port >> (EVTCHN_WORD_BITORDER(d) << 1);
+    unsigned int l2bit = port >> EVTCHN_WORD_BITORDER(d);
+
+    /*
+     * The following bit operations must happen in strict order.
+     * NB. On x86, the atomic bit operations also act as memory barriers.
+     * There is therefore sufficiently strict ordering for this architecture --
+     * others may require explicit memory barriers.
+     */
+
+    if ( test_and_set_bit(offset, d->evtchn_pending[page_no]) )
+         return;
+
+    if ( !test_bit(offset, d->evtchn_mask[page_no]) &&
+         !test_and_set_bit(l2bit, v->evtchn_pending_sel_l2) &&
+         !test_and_set_bit(l1bit, &vcpu_info(v, evtchn_pending_sel)) )
     {
-        v = d->vcpu[vcpuid];
-        if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
-             test_and_clear_bit(vcpuid, d->poll_mask) )
-        {
-            v->poll_evtchn = 0;
-            vcpu_unblock(v);
-        }
+        vcpu_mark_events_pending(v);
     }
+
+    __check_vcpu_polling(v, port);
 }
 
 static void evtchn_clear_pending_l2(struct domain *d, int port)
@@ -685,6 +734,13 @@ static void evtchn_clear_pending_l2(struct domain *d, int 
port)
     clear_bit(port, &shared_info(d, evtchn_pending));
 }
 
+static void evtchn_clear_pending_l3(struct domain *d, int port)
+{
+    unsigned int page_no = EVTCHN_PAGE_NO(port);
+    unsigned int offset = EVTCHN_OFFSET_IN_PAGE(port);
+    clear_bit(offset, d->evtchn_pending[page_no]);
+}
+
 int guest_enabled_event(struct vcpu *v, uint32_t virq)
 {
     return ((v != NULL) && (v->virq_to_evtchn[virq] != 0));
@@ -976,6 +1032,37 @@ static int evtchn_unmask_l2(unsigned int port)
     return 0;
 }
 
+static int evtchn_unmask_l3(unsigned int port)
+{
+    struct domain *d = current->domain;
+    struct vcpu   *v;
+    unsigned int page_no = EVTCHN_PAGE_NO(port);
+    unsigned int offset = EVTCHN_OFFSET_IN_PAGE(port);
+    unsigned int l1bit = port >> (EVTCHN_WORD_BITORDER(d) << 1);
+    unsigned int l2bit = port >> EVTCHN_WORD_BITORDER(d);
+
+    ASSERT(spin_is_locked(&d->event_lock));
+
+    if ( unlikely(!port_is_valid(d, port)) )
+        return -EINVAL;
+
+    v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
+
+    /*
+     * These operations must happen in strict order. Based on
+     * include/xen/event.h:evtchn_set_pending().
+     */
+    if ( test_and_clear_bit(offset, d->evtchn_mask[page_no]) &&
+         test_bit          (offset, d->evtchn_pending[page_no]) &&
+         !test_and_set_bit (l2bit, v->evtchn_pending_sel_l2) &&
+         !test_and_set_bit (l1bit, &vcpu_info(v, evtchn_pending_sel)) )
+    {
+        vcpu_mark_events_pending(v);
+    }
+
+    return 0;
+}
+
 int evtchn_unmask(unsigned int port)
 {
     struct domain *d = current->domain;
@@ -1163,6 +1250,22 @@ static void __evtchn_setup_bitmap_l3(struct domain *d)
            sizeof(shared_info(d, evtchn_mask)));
 }
 
+static struct xen_evtchn_ops __read_mostly xen_evtchn_ops_l2 = {
+    .set_pending      = evtchn_set_pending_l2,
+    .clear_pending    = evtchn_clear_pending_l2,
+    .unmask           = evtchn_unmask_l2,
+    .is_pending       = evtchn_is_pending_l2,
+    .is_masked        = evtchn_is_masked_l2,
+};
+
+static struct xen_evtchn_ops __read_mostly xen_evtchn_ops_l3 = {
+    .set_pending      = evtchn_set_pending_l3,
+    .clear_pending    = evtchn_clear_pending_l3,
+    .unmask           = evtchn_unmask_l3,
+    .is_pending       = evtchn_is_pending_l3,
+    .is_masked        = evtchn_is_masked_l3,
+};
+
 static long evtchn_register_3level(
     XEN_GUEST_HANDLE_PARAM(evtchn_register_3level_t) arg)
 {
@@ -1235,6 +1338,7 @@ static long evtchn_register_3level(
     __evtchn_setup_bitmap_l3(d);
 
     d->evtchn_level = EVTCHN_3_LEVEL;
+    d->eops = &xen_evtchn_ops_l3;
 
  out_free:
     if ( evtchn_pending )
@@ -1383,10 +1487,6 @@ long do_event_channel_op(int cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( copy_from_guest(&reg, arg, 1) != 0 )
             return -EFAULT;
         rc = evtchn_register_nlevel(&reg);
-
-        /* XXX always fails this call because it is not yet completed */
-        rc = -EINVAL;
-
         break;
     }
 
@@ -1481,14 +1581,6 @@ void notify_via_xen_event_channel(struct domain *ld, int 
lport)
     spin_unlock(&ld->event_lock);
 }
 
-static struct xen_evtchn_ops __read_mostly xen_evtchn_ops_l2 = {
-    .set_pending      = evtchn_set_pending_l2,
-    .clear_pending    = evtchn_clear_pending_l2,
-    .unmask           = evtchn_unmask_l2,
-    .is_pending       = evtchn_is_pending_l2,
-    .is_masked        = evtchn_is_masked_l2,
-};
-
 int evtchn_init(struct domain *d)
 {
     BUILD_BUG_ON(sizeof(struct evtchn *) * NR_EVTCHN_BUCKETS > PAGE_SIZE);
@@ -1597,8 +1689,10 @@ static void domain_dump_evtchn_info(struct domain *d)
     bitmap_scnlistprintf(keyhandler_scratch, sizeof(keyhandler_scratch),
                          d->poll_mask, d->max_vcpus);
     printk("Event channel information for domain %d:\n"
+           "Using %d-level event channel\n"
            "Polling vCPUs: {%s}\n"
-           "    port [p/m]\n", d->domain_id, keyhandler_scratch);
+           "    port [p/m]\n",
+           d->domain_id, d->evtchn_level, keyhandler_scratch);
 
     spin_lock(&d->event_lock);
 
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.