[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] evtchn: refactor low-level event channel port ops



commit fbbd5009e6ed1201731b1727762070c1a988e67d
Author:     David Vrabel <david.vrabel@xxxxxxxxxx>
AuthorDate: Mon Oct 14 10:15:49 2013 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Oct 14 10:15:49 2013 +0200

    evtchn: refactor low-level event channel port ops
    
    Use functions for the low-level event channel port operations
    (set/clear pending, unmask, is_pending and is_masked).
    
    Group these functions into a struct evtchn_port_op so they can be
    replaced by alternate implementations (for different ABIs) on a
    per-domain basis.
    
    Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Keir Fraser <keir@xxxxxxx>
---
 xen/arch/x86/irq.c         |   11 ++---
 xen/common/Makefile        |    1 +
 xen/common/event_2l.c      |   99 ++++++++++++++++++++++++++++++++++++++++++++
 xen/common/event_channel.c |   87 +++++++++++++++------------------------
 xen/common/schedule.c      |    3 +-
 xen/include/xen/event.h    |   45 ++++++++++++++++++++
 xen/include/xen/sched.h    |    4 ++
 7 files changed, 189 insertions(+), 61 deletions(-)

diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 7f547ff..53fe9e3 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1474,7 +1474,7 @@ int pirq_guest_unmask(struct domain *d)
         {
             pirq = pirqs[i]->pirq;
             if ( pirqs[i]->masked &&
-                 !test_bit(pirqs[i]->evtchn, &shared_info(d, evtchn_mask)) )
+                 !evtchn_port_is_masked(d, evtchn_from_port(d, 
pirqs[i]->evtchn)) )
                 pirq_guest_eoi(pirqs[i]);
         }
     } while ( ++pirq < d->nr_pirqs && n == ARRAY_SIZE(pirqs) );
@@ -2222,6 +2222,7 @@ static void dump_irqs(unsigned char key)
     int i, irq, pirq;
     struct irq_desc *desc;
     irq_guest_action_t *action;
+    struct evtchn *evtchn;
     struct domain *d;
     const struct pirq *info;
     unsigned long flags;
@@ -2262,13 +2263,11 @@ static void dump_irqs(unsigned char key)
                 d = action->guest[i];
                 pirq = domain_irq_to_pirq(d, irq);
                 info = pirq_info(d, pirq);
+                evtchn = evtchn_from_port(d, info->evtchn);
                 printk("%u:%3d(%c%c%c)",
                        d->domain_id, pirq,
-                       (test_bit(info->evtchn,
-                                 &shared_info(d, evtchn_pending)) ?
-                        'P' : '-'),
-                       (test_bit(info->evtchn, &shared_info(d, evtchn_mask)) ?
-                        'M' : '-'),
+                       (evtchn_port_is_pending(d, evtchn) ? 'P' : '-'),
+                       (evtchn_port_is_masked(d, evtchn) ? 'M' : '-'),
                        (info->masked ? 'M' : '-'));
                 if ( i != action->nr_guests )
                     printk(",");
diff --git a/xen/common/Makefile b/xen/common/Makefile
index fcb4a84..f6c473a 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -5,6 +5,7 @@ obj-y += cpupool.o
 obj-$(HAS_DEVICE_TREE) += device_tree.o
 obj-y += domctl.o
 obj-y += domain.o
+obj-y += event_2l.o
 obj-y += event_channel.o
 obj-y += grant_table.o
 obj-y += irq.o
diff --git a/xen/common/event_2l.c b/xen/common/event_2l.c
new file mode 100644
index 0000000..7b28942
--- /dev/null
+++ b/xen/common/event_2l.c
@@ -0,0 +1,99 @@
+/*
+ * Event channel port operations.
+ *
+ * Copyright (c) 2003-2006, K A Fraser.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2 or later.  See the file COPYING for more details.
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+#include <xen/event.h>
+
+static void evtchn_2l_set_pending(struct vcpu *v, struct evtchn *evtchn)
+{
+    struct domain *d = v->domain;
+    unsigned int port = evtchn->port;
+
+    /*
+     * The following bit operations must happen in strict order.
+     * NB. On x86, the atomic bit operations also act as memory barriers.
+     * There is therefore sufficiently strict ordering for this architecture --
+     * others may require explicit memory barriers.
+     */
+
+    if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
+        return;
+
+    if ( !test_bit        (port, &shared_info(d, evtchn_mask)) &&
+         !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
+                           &vcpu_info(v, evtchn_pending_sel)) )
+    {
+        vcpu_mark_events_pending(v);
+    }
+
+    evtchn_check_pollers(d, port);
+}
+
+static void evtchn_2l_clear_pending(struct domain *d, struct evtchn *evtchn)
+{
+    clear_bit(evtchn->port, &shared_info(d, evtchn_pending));
+}
+
+static void evtchn_2l_unmask(struct domain *d, struct evtchn *evtchn)
+{
+    struct vcpu *v = d->vcpu[evtchn->notify_vcpu_id];
+    unsigned int port = evtchn->port;
+
+    /*
+     * These operations must happen in strict order. Based on
+     * evtchn_2l_set_pending() above.
+     */
+    if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
+         test_bit          (port, &shared_info(d, evtchn_pending)) &&
+         !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
+                            &vcpu_info(v, evtchn_pending_sel)) )
+    {
+        vcpu_mark_events_pending(v);
+    }
+}
+
+static bool_t evtchn_2l_is_pending(struct domain *d,
+                                   const struct evtchn *evtchn)
+{
+    return test_bit(evtchn->port, &shared_info(d, evtchn_pending));
+}
+
+static bool_t evtchn_2l_is_masked(struct domain *d,
+                                  const struct evtchn *evtchn)
+{
+    return test_bit(evtchn->port, &shared_info(d, evtchn_mask));
+}
+
+static const struct evtchn_port_ops evtchn_port_ops_2l =
+{
+    .set_pending   = evtchn_2l_set_pending,
+    .clear_pending = evtchn_2l_clear_pending,
+    .unmask        = evtchn_2l_unmask,
+    .is_pending    = evtchn_2l_is_pending,
+    .is_masked     = evtchn_2l_is_masked,
+};
+
+void evtchn_2l_init(struct domain *d)
+{
+    d->evtchn_port_ops = &evtchn_port_ops_2l;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 64c976b..7290a21 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -150,6 +150,7 @@ static int get_free_port(struct domain *d)
             xfree(chn);
             return -ENOMEM;
         }
+        chn[i].port = port + i;
     }
 
     bucket_from_port(d, port) = chn;
@@ -530,7 +531,7 @@ static long __evtchn_close(struct domain *d1, int port1)
     }
 
     /* Clear pending event to avoid unexpected behavior on re-bind. */
-    clear_bit(port1, &shared_info(d1, evtchn_pending));
+    evtchn_port_clear_pending(d1, chn1);
 
     /* Reset binding to vcpu0 when the channel is freed. */
     chn1->state          = ECS_FREE;
@@ -615,43 +616,7 @@ out:
 
 static void evtchn_set_pending(struct vcpu *v, int port)
 {
-    struct domain *d = v->domain;
-    int vcpuid;
-
-    /*
-     * The following bit operations must happen in strict order.
-     * NB. On x86, the atomic bit operations also act as memory barriers.
-     * There is therefore sufficiently strict ordering for this architecture --
-     * others may require explicit memory barriers.
-     */
-
-    if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
-        return;
-
-    if ( !test_bit        (port, &shared_info(d, evtchn_mask)) &&
-         !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
-                           &vcpu_info(v, evtchn_pending_sel)) )
-    {
-        vcpu_mark_events_pending(v);
-    }
-    
-    /* Check if some VCPU might be polling for this event. */
-    if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
-        return;
-
-    /* Wake any interested (or potentially interested) pollers. */
-    for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
-          vcpuid < d->max_vcpus;
-          vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
-    {
-        v = d->vcpu[vcpuid];
-        if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
-             test_and_clear_bit(vcpuid, d->poll_mask) )
-        {
-            v->poll_evtchn = 0;
-            vcpu_unblock(v);
-        }
-    }
+    evtchn_port_set_pending(v, evtchn_from_port(v->domain, port));
 }
 
 int guest_enabled_event(struct vcpu *v, uint32_t virq)
@@ -920,26 +885,15 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int 
vcpu_id)
 int evtchn_unmask(unsigned int port)
 {
     struct domain *d = current->domain;
-    struct vcpu   *v;
+    struct evtchn *evtchn;
 
     ASSERT(spin_is_locked(&d->event_lock));
 
     if ( unlikely(!port_is_valid(d, port)) )
         return -EINVAL;
 
-    v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
-
-    /*
-     * These operations must happen in strict order. Based on
-     * include/xen/event.h:evtchn_set_pending(). 
-     */
-    if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
-         test_bit          (port, &shared_info(d, evtchn_pending)) &&
-         !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
-                            &vcpu_info(v, evtchn_pending_sel)) )
-    {
-        vcpu_mark_events_pending(v);
-    }
+    evtchn = evtchn_from_port(d, port);
+    evtchn_port_unmask(d, evtchn);
 
     return 0;
 }
@@ -1170,9 +1124,34 @@ void notify_via_xen_event_channel(struct domain *ld, int 
lport)
     spin_unlock(&ld->event_lock);
 }
 
+void evtchn_check_pollers(struct domain *d, unsigned int port)
+{
+    struct vcpu *v;
+    unsigned int vcpuid;
+
+    /* Check if some VCPU might be polling for this event. */
+    if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
+        return;
+
+    /* Wake any interested (or potentially interested) pollers. */
+    for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
+          vcpuid < d->max_vcpus;
+          vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
+    {
+        v = d->vcpu[vcpuid];
+        if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
+             test_and_clear_bit(vcpuid, d->poll_mask) )
+        {
+            v->poll_evtchn = 0;
+            vcpu_unblock(v);
+        }
+    }
+}
 
 int evtchn_init(struct domain *d)
 {
+    evtchn_2l_init(d);
+
     spin_lock_init(&d->event_lock);
     if ( get_free_port(d) != 0 )
         return -EINVAL;
@@ -1270,8 +1249,8 @@ static void domain_dump_evtchn_info(struct domain *d)
 
         printk("    %4u [%d/%d]: s=%d n=%d x=%d",
                port,
-               !!test_bit(port, &shared_info(d, evtchn_pending)),
-               !!test_bit(port, &shared_info(d, evtchn_mask)),
+               !!evtchn_port_is_pending(d, chn),
+               !!evtchn_port_is_masked(d, chn),
                chn->state, chn->notify_vcpu_id, chn->xen_consumer);
 
         switch ( chn->state )
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index af3abc2..b8e4cb4 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -34,6 +34,7 @@
 #include <xen/multicall.h>
 #include <xen/cpu.h>
 #include <xen/preempt.h>
+#include <xen/event.h>
 #include <public/sched.h>
 #include <xsm/xsm.h>
 
@@ -759,7 +760,7 @@ static long do_poll(struct sched_poll *sched_poll)
             goto out;
 
         rc = 0;
-        if ( test_bit(port, &shared_info(d, evtchn_pending)) )
+        if ( evtchn_port_is_pending(d, evtchn_from_port(d, port)) )
             goto out;
     }
 
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index 6f60162..30c59c9 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -102,4 +102,49 @@ void notify_via_xen_event_channel(struct domain *ld, int 
lport);
         smp_mb(); /* set blocked status /then/ caller does his work */  \
     } while ( 0 )
 
+void evtchn_check_pollers(struct domain *d, unsigned int port);
+
+void evtchn_2l_init(struct domain *d);
+
+/*
+ * Low-level event channel port ops.
+ */
+struct evtchn_port_ops {
+    void (*set_pending)(struct vcpu *v, struct evtchn *evtchn);
+    void (*clear_pending)(struct domain *d, struct evtchn *evtchn);
+    void (*unmask)(struct domain *d, struct evtchn *evtchn);
+    bool_t (*is_pending)(struct domain *d, const struct evtchn *evtchn);
+    bool_t (*is_masked)(struct domain *d, const struct evtchn *evtchn);
+};
+
+static inline void evtchn_port_set_pending(struct vcpu *v,
+                                           struct evtchn *evtchn)
+{
+    v->domain->evtchn_port_ops->set_pending(v, evtchn);
+}
+
+static inline void evtchn_port_clear_pending(struct domain *d,
+                                             struct evtchn *evtchn)
+{
+    d->evtchn_port_ops->clear_pending(d, evtchn);
+}
+
+static inline void evtchn_port_unmask(struct domain *d,
+                                      struct evtchn *evtchn)
+{
+    d->evtchn_port_ops->unmask(d, evtchn);
+}
+
+static inline bool_t evtchn_port_is_pending(struct domain *d,
+                                            const struct evtchn *evtchn)
+{
+    return d->evtchn_port_ops->is_pending(d, evtchn);
+}
+
+static inline bool_t evtchn_port_is_masked(struct domain *d,
+                                           const struct evtchn *evtchn)
+{
+    return d->evtchn_port_ops->is_masked(d, evtchn);
+}
+
 #endif /* __XEN_EVENT_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 2e83f08..7c9eca3 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -66,6 +66,7 @@ struct evtchn
     u8  state;             /* ECS_* */
     u8  xen_consumer;      /* Consumer in Xen, if any? (0 = send to guest) */
     u16 notify_vcpu_id;    /* VCPU for local delivery notification */
+    u32 port;
     union {
         struct {
             domid_t remote_domid;
@@ -238,6 +239,8 @@ struct mem_event_per_domain
     struct mem_event_domain access;
 };
 
+struct evtchn_port_ops;
+
 struct domain
 {
     domid_t          domain_id;
@@ -271,6 +274,7 @@ struct domain
     /* Event channel information. */
     struct evtchn   *evtchn[NR_EVTCHN_BUCKETS];
     spinlock_t       event_lock;
+    const struct evtchn_port_ops *evtchn_port_ops;
 
     struct grant_table *grant_table;
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.