[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/8] evtchn: refactor low-level event channel port ops



From: David Vrabel <david.vrabel@xxxxxxxxxx>

Use functions for the low-level event channel port operations
(set/clear pending, unmask, is_pending and is_masked).

Group these functions into a struct evtchn_port_op so they can be
replaced by alternate implementations (for different ABIs) on a
per-domain basis.

Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
---
 xen/arch/x86/irq.c         |   11 ++---
 xen/common/Makefile        |    1 +
 xen/common/event_channel.c |   65 +++++---------------------
 xen/common/event_port.c    |  108 ++++++++++++++++++++++++++++++++++++++++++++
 xen/common/schedule.c      |    3 +-
 xen/include/xen/event.h    |   40 ++++++++++++++++
 xen/include/xen/sched.h    |    4 ++
 7 files changed, 172 insertions(+), 60 deletions(-)
 create mode 100644 xen/common/event_port.c

diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 345f1ae..3d26910 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1452,7 +1452,7 @@ int pirq_guest_unmask(struct domain *d)
         {
             pirq = pirqs[i]->pirq;
             if ( pirqs[i]->masked &&
-                 !test_bit(pirqs[i]->evtchn, &shared_info(d, evtchn_mask)) )
+                 !evtchn_port_is_masked(d, evtchn_from_port(d, 
pirqs[i]->evtchn)) )
                 pirq_guest_eoi(pirqs[i]);
         }
     } while ( ++pirq < d->nr_pirqs && n == ARRAY_SIZE(pirqs) );
@@ -2046,6 +2046,7 @@ static void dump_irqs(unsigned char key)
     int i, irq, pirq;
     struct irq_desc *desc;
     irq_guest_action_t *action;
+    struct evtchn *evtchn;
     struct domain *d;
     const struct pirq *info;
     unsigned long flags;
@@ -2088,13 +2089,11 @@ static void dump_irqs(unsigned char key)
                 d = action->guest[i];
                 pirq = domain_irq_to_pirq(d, irq);
                 info = pirq_info(d, pirq);
+                evtchn = evtchn_from_port(d, info->evtchn);
                 printk("%u:%3d(%c%c%c)",
                        d->domain_id, pirq,
-                       (test_bit(info->evtchn,
-                                 &shared_info(d, evtchn_pending)) ?
-                        'P' : '-'),
-                       (test_bit(info->evtchn, &shared_info(d, evtchn_mask)) ?
-                        'M' : '-'),
+                       (evtchn_port_is_pending(d, evtchn) ? 'P' : '-'),
+                       (evtchn_port_is_masked(d, evtchn) ? 'M' : '-'),
                        (info->masked ? 'M' : '-'));
                 if ( i != action->nr_guests )
                     printk(",");
diff --git a/xen/common/Makefile b/xen/common/Makefile
index 8a0c506..f967b49 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -6,6 +6,7 @@ obj-$(HAS_DEVICE_TREE) += device_tree.o
 obj-y += domctl.o
 obj-y += domain.o
 obj-y += event_channel.o
+obj-y += event_port.o
 obj-y += grant_table.o
 obj-y += irq.o
 obj-y += kernel.o
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index a0f293f..b5e74d6 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -151,6 +151,7 @@ static int get_free_port(struct domain *d)
             xfree(chn);
             return -ENOMEM;
         }
+        chn[i].port = port + i;
     }
 
     return port;
@@ -529,7 +530,7 @@ static long __evtchn_close(struct domain *d1, int port1)
     }
 
     /* Clear pending event to avoid unexpected behavior on re-bind. */
-    clear_bit(port1, &shared_info(d1, evtchn_pending));
+    evtchn_port_clear_pending(d1, chn1);
 
     /* Reset binding to vcpu0 when the channel is freed. */
     chn1->state          = ECS_FREE;
@@ -614,43 +615,10 @@ out:
 
 static void evtchn_set_pending(struct vcpu *v, int port)
 {
-    struct domain *d = v->domain;
-    int vcpuid;
-
-    /*
-     * The following bit operations must happen in strict order.
-     * NB. On x86, the atomic bit operations also act as memory barriers.
-     * There is therefore sufficiently strict ordering for this architecture --
-     * others may require explicit memory barriers.
-     */
-
-    if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
-        return;
-
-    if ( !test_bit        (port, &shared_info(d, evtchn_mask)) &&
-         !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
-                           &vcpu_info(v, evtchn_pending_sel)) )
-    {
-        vcpu_mark_events_pending(v);
-    }
-    
-    /* Check if some VCPU might be polling for this event. */
-    if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
-        return;
+    struct evtchn *evtchn;
 
-    /* Wake any interested (or potentially interested) pollers. */
-    for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
-          vcpuid < d->max_vcpus;
-          vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
-    {
-        v = d->vcpu[vcpuid];
-        if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
-             test_and_clear_bit(vcpuid, d->poll_mask) )
-        {
-            v->poll_evtchn = 0;
-            vcpu_unblock(v);
-        }
-    }
+    evtchn = evtchn_from_port(v->domain, port);
+    evtchn_port_set_pending(v, evtchn);
 }
 
 int guest_enabled_event(struct vcpu *v, uint32_t virq)
@@ -919,26 +887,15 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int 
vcpu_id)
 int evtchn_unmask(unsigned int port)
 {
     struct domain *d = current->domain;
-    struct vcpu   *v;
+    struct evtchn *evtchn;
 
     ASSERT(spin_is_locked(&d->event_lock));
 
     if ( unlikely(!port_is_valid(d, port)) )
         return -EINVAL;
 
-    v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
-
-    /*
-     * These operations must happen in strict order. Based on
-     * include/xen/event.h:evtchn_set_pending(). 
-     */
-    if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
-         test_bit          (port, &shared_info(d, evtchn_pending)) &&
-         !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
-                            &vcpu_info(v, evtchn_pending_sel)) )
-    {
-        vcpu_mark_events_pending(v);
-    }
+    evtchn = evtchn_from_port(d, port);
+    evtchn_port_unmask(d, evtchn);
 
     return 0;
 }
@@ -1184,6 +1141,8 @@ int evtchn_init(struct domain *d)
     bitmap_zero(d->poll_mask, MAX_VIRT_CPUS);
 #endif
 
+    d->evtchn_port_ops = &evtchn_port_ops_2l;
+
     return 0;
 }
 
@@ -1269,8 +1228,8 @@ static void domain_dump_evtchn_info(struct domain *d)
 
         printk("    %4u [%d/%d]: s=%d n=%d x=%d",
                port,
-               !!test_bit(port, &shared_info(d, evtchn_pending)),
-               !!test_bit(port, &shared_info(d, evtchn_mask)),
+               !!evtchn_port_is_pending(d, chn),
+               !!evtchn_port_is_masked(d, chn),
                chn->state, chn->notify_vcpu_id, chn->xen_consumer);
 
         switch ( chn->state )
diff --git a/xen/common/event_port.c b/xen/common/event_port.c
new file mode 100644
index 0000000..b0ef25b
--- /dev/null
+++ b/xen/common/event_port.c
@@ -0,0 +1,108 @@
+/*
+ * Event channel port operations.
+ *
+ * Copyright (c) 2003-2006, K A Fraser.
+ * 
+ * This source code is licensed under the GNU General Public License,
+ * Version 2 or later.  See the file COPYING for more details.
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+#include <xen/event.h>
+
+static void evtchn_check_pollers(struct domain *d, int port)
+{
+    struct vcpu *v;
+    int vcpuid;
+
+    /* Check if some VCPU might be polling for this event. */
+    if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
+        return;
+
+    /* Wake any interested (or potentially interested) pollers. */
+    for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
+          vcpuid < d->max_vcpus;
+          vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
+    {
+        v = d->vcpu[vcpuid];
+        if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
+             test_and_clear_bit(vcpuid, d->poll_mask) )
+        {
+            v->poll_evtchn = 0;
+            vcpu_unblock(v);
+        }
+    }
+}
+
+static void evtchn_2l_set_pending(struct vcpu *v, struct evtchn *evtchn)
+{
+    struct domain *d = v->domain;
+    int port = evtchn->port;
+
+    /*
+     * The following bit operations must happen in strict order.
+     * NB. On x86, the atomic bit operations also act as memory barriers.
+     * There is therefore sufficiently strict ordering for this architecture --
+     * others may require explicit memory barriers.
+     */
+
+    if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
+        return;
+
+    if ( !test_bit        (port, &shared_info(d, evtchn_mask)) &&
+         !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
+                           &vcpu_info(v, evtchn_pending_sel)) )
+    {
+        vcpu_mark_events_pending(v);
+    }
+
+    evtchn_check_pollers(d, v->poll_evtchn);
+}
+
+static void evtchn_2l_clear_pending(struct domain *d, struct evtchn *evtchn)
+{
+    clear_bit(evtchn->port, &shared_info(d, evtchn_pending));
+}
+
+static void evtchn_2l_unmask(struct domain *d, struct evtchn *evtchn)
+{
+    struct vcpu *v = d->vcpu[evtchn->notify_vcpu_id];
+    int port = evtchn->port;
+
+    /*
+     * These operations must happen in strict order. Based on
+     * evtchn_2l_set_pending() above.
+     */
+    if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
+         test_bit          (port, &shared_info(d, evtchn_pending)) &&
+         !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
+                            &vcpu_info(v, evtchn_pending_sel)) )
+    {
+        vcpu_mark_events_pending(v);
+    }
+}
+
+static bool_t evtchn_2l_is_pending(struct domain *d,
+                                   const struct evtchn *evtchn)
+{
+    return test_bit(evtchn->port, &shared_info(d, evtchn_pending));
+}
+
+static bool_t evtchn_2l_is_masked(struct domain *d,
+                                  const struct evtchn *evtchn)
+{
+    return test_bit(evtchn->port, &shared_info(d, evtchn_mask));
+}
+
+struct evtchn_port_ops evtchn_port_ops_2l =
+{
+    .set_pending   = evtchn_2l_set_pending,
+    .clear_pending = evtchn_2l_clear_pending,
+    .unmask        = evtchn_2l_unmask,
+    .is_pending    = evtchn_2l_is_pending,
+    .is_masked     = evtchn_2l_is_masked,
+};
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 9c58b12..9d2aa5e 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -33,6 +33,7 @@
 #include <xen/multicall.h>
 #include <xen/cpu.h>
 #include <xen/preempt.h>
+#include <xen/event.h>
 #include <public/sched.h>
 #include <xsm/xsm.h>
 
@@ -693,7 +694,7 @@ static long do_poll(struct sched_poll *sched_poll)
             goto out;
 
         rc = 0;
-        if ( test_bit(port, &shared_info(d, evtchn_pending)) )
+        if ( evtchn_port_is_pending(d, evtchn_from_port(d, port)) )
             goto out;
     }
 
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index 71c3e92..0d23e3f 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -102,4 +102,44 @@ void notify_via_xen_event_channel(struct domain *ld, int 
lport);
         mb(); /* set blocked status /then/ caller does his work */      \
     } while ( 0 )
 
+/*
+ * Low-level event channel port ops.
+ */
+struct evtchn_port_ops {
+    void (*set_pending)(struct vcpu *v, struct evtchn *evtchn);
+    void (*clear_pending)(struct domain *d, struct evtchn *evtchn);
+    void (*unmask)(struct domain *d, struct evtchn *evtchn);
+    bool_t (*is_pending)(struct domain *d, const struct evtchn *evtchn);
+    bool_t (*is_masked)(struct domain *d, const struct evtchn *evtchn);
+};
+
+extern struct evtchn_port_ops evtchn_port_ops_2l;
+
+static inline void evtchn_port_set_pending(struct vcpu *v, struct evtchn 
*evtchn)
+{
+    v->domain->evtchn_port_ops->set_pending(v, evtchn);
+}
+
+static inline void evtchn_port_clear_pending(struct domain *d, struct evtchn 
*evtchn)
+{
+    d->evtchn_port_ops->clear_pending(d, evtchn);
+}
+
+static inline void evtchn_port_unmask(struct domain *d,
+                                      const struct evtchn *evtchn)
+{
+    d->evtchn_port_ops->unmask(d, evtchn);
+}
+
+static inline bool_t evtchn_port_is_pending(struct domain *d,
+                                            const struct evtchn *evtchn)
+{
+    return d->evtchn_port_ops->is_pending(d, evtchn);
+}
+
+static inline bool_t evtchn_port_is_masked(struct domain *d, struct evtchn 
*evtchn)
+{
+    return d->evtchn_port_ops->is_masked(d, evtchn);
+}
+
 #endif /* __XEN_EVENT_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index e108436..0db73c0 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -81,6 +81,7 @@ struct evtchn
         } pirq;        /* state == ECS_PIRQ */
         u16 virq;      /* state == ECS_VIRQ */
     } u;
+    u32 port;
 #ifdef FLASK_ENABLE
     void *ssid;
 #endif
@@ -230,6 +231,8 @@ struct mem_event_per_domain
     struct mem_event_domain access;
 };
 
+struct evtchn_port_ops;
+
 struct domain
 {
     domid_t          domain_id;
@@ -262,6 +265,7 @@ struct domain
     /* Event channel information. */
     struct evtchn   *evtchn[NR_EVTCHN_BUCKETS];
     spinlock_t       event_lock;
+    struct evtchn_port_ops *evtchn_port_ops;
 
     struct grant_table *grant_table;
 
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.