|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Create a generic callback mechanism for Xen-bound event channels
# HG changeset patch
# User Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
# Date 1323202232 0
# Node ID 1620291f0c4a413b7ea7546fee6802a56d703d8f
# Parent d35bedf334f2072172dfc367b8db7b8a2a88a7ad
Create a generic callback mechanism for Xen-bound event channels
For event channels for which Xen is the consumer, there currently is
a single action. With this patch, we allow event channel creators to
specify a generic callback (or no callback). Because the expectation
is that there will be few callbacks, they are stored in a small table.
Signed-off-by: Adin Scannell <adin@xxxxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxx>
Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---
diff -r d35bedf334f2 -r 1620291f0c4a xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c Tue Dec 06 20:10:32 2011 +0000
+++ b/xen/arch/ia64/vmx/vmx_init.c Tue Dec 06 20:10:32 2011 +0000
@@ -377,7 +377,7 @@
{
struct vmx_ioreq_page *iorp = &v->domain->arch.hvm_domain.ioreq;
- int rc = alloc_unbound_xen_event_channel(v, 0);
+ int rc = alloc_unbound_xen_event_channel(v, 0, NULL);
if (rc < 0)
return rc;
v->arch.arch_vmx.xen_port = rc;
diff -r d35bedf334f2 -r 1620291f0c4a xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Tue Dec 06 20:10:32 2011 +0000
+++ b/xen/arch/x86/hvm/hvm.c Tue Dec 06 20:10:32 2011 +0000
@@ -979,7 +979,7 @@
goto fail3;
/* Create ioreq event channel. */
- rc = alloc_unbound_xen_event_channel(v, 0);
+ rc = alloc_unbound_xen_event_channel(v, 0, NULL);
if ( rc < 0 )
goto fail4;
@@ -989,7 +989,7 @@
if ( v->vcpu_id == 0 )
{
/* Create bufioreq event channel. */
- rc = alloc_unbound_xen_event_channel(v, 0);
+ rc = alloc_unbound_xen_event_channel(v, 0, NULL);
if ( rc < 0 )
goto fail2;
v->domain->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] = rc;
@@ -3591,7 +3591,8 @@
for_each_vcpu ( d, v )
{
int old_port, new_port;
- new_port = alloc_unbound_xen_event_channel(v, a.value);
+ new_port = alloc_unbound_xen_event_channel(
+ v, a.value, NULL);
if ( new_port < 0 )
{
rc = new_port;
diff -r d35bedf334f2 -r 1620291f0c4a xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c Tue Dec 06 20:10:32 2011 +0000
+++ b/xen/arch/x86/mm/mem_event.c Tue Dec 06 20:10:32 2011 +0000
@@ -93,7 +93,8 @@
/* Allocate event channel */
rc = alloc_unbound_xen_event_channel(d->vcpu[0],
- current->domain->domain_id);
+ current->domain->domain_id,
+ NULL);
if ( rc < 0 )
goto err;
diff -r d35bedf334f2 -r 1620291f0c4a xen/common/event_channel.c
--- a/xen/common/event_channel.c Tue Dec 06 20:10:32 2011 +0000
+++ b/xen/common/event_channel.c Tue Dec 06 20:10:32 2011 +0000
@@ -57,6 +57,51 @@
goto out; \
} while ( 0 )
+#define consumer_is_xen(e) (!!(e)->xen_consumer)
+
+/*
+ * The function alloc_unbound_xen_event_channel() allows an arbitrary
+ * notifier function to be specified. However, very few unique functions
+ * are specified in practice, so to prevent bloating the evtchn structure
+ * with a pointer, we stash them dynamically in a small lookup array which
+ * can be indexed by a small integer.
+ */
+static xen_event_channel_notification_t xen_consumers[8];
+
+/* Default notification action: wake up from wait_on_xen_event_channel(). */
+static void default_xen_notification_fn(struct vcpu *v, unsigned int port)
+{
+ /* Consumer needs notification only if blocked. */
+ if ( test_and_clear_bit(_VPF_blocked_in_xen, &v->pause_flags) )
+ vcpu_wake(v);
+}
+
+/*
+ * Given a notification function, return the value to stash in
+ * the evtchn->xen_consumer field.
+ */
+static uint8_t get_xen_consumer(xen_event_channel_notification_t fn)
+{
+ unsigned int i;
+
+ if ( fn == NULL )
+ fn = default_xen_notification_fn;
+
+ for ( i = 0; i < ARRAY_SIZE(xen_consumers); i++ )
+ {
+ if ( xen_consumers[i] == NULL )
+ xen_consumers[i] = fn;
+ if ( xen_consumers[i] == fn )
+ break;
+ }
+
+ BUG_ON(i >= ARRAY_SIZE(xen_consumers));
+ return i+1;
+}
+
+/* Get the notification function for a given Xen-bound event channel. */
+#define xen_notification_fn(e) (xen_consumers[(e)->xen_consumer-1])
+
static int evtchn_set_pending(struct vcpu *v, int port);
static int virq_is_global(int virq)
@@ -397,7 +442,7 @@
chn1 = evtchn_from_port(d1, port1);
/* Guest cannot close a Xen-attached event channel. */
- if ( unlikely(chn1->consumer_is_xen) )
+ if ( unlikely(consumer_is_xen(chn1)) )
{
rc = -EINVAL;
goto out;
@@ -537,7 +582,7 @@
lchn = evtchn_from_port(ld, lport);
/* Guest cannot send via a Xen-attached event channel. */
- if ( unlikely(lchn->consumer_is_xen) )
+ if ( unlikely(consumer_is_xen(lchn)) )
{
spin_unlock(&ld->event_lock);
return -EINVAL;
@@ -554,13 +599,8 @@
rport = lchn->u.interdomain.remote_port;
rchn = evtchn_from_port(rd, rport);
rvcpu = rd->vcpu[rchn->notify_vcpu_id];
- if ( rchn->consumer_is_xen )
- {
- /* Xen consumers need notification only if they are blocked. */
- if ( test_and_clear_bit(_VPF_blocked_in_xen,
- &rvcpu->pause_flags) )
- vcpu_wake(rvcpu);
- }
+ if ( consumer_is_xen(rchn) )
+ (*xen_notification_fn(rchn))(rvcpu, rport);
else
{
evtchn_set_pending(rvcpu, rport);
@@ -787,7 +827,7 @@
chn = evtchn_from_port(d, port);
/* Guest cannot re-bind a Xen-attached event channel. */
- if ( unlikely(chn->consumer_is_xen) )
+ if ( unlikely(consumer_is_xen(chn)) )
{
rc = -EINVAL;
goto out;
@@ -998,7 +1038,8 @@
int alloc_unbound_xen_event_channel(
- struct vcpu *local_vcpu, domid_t remote_domid)
+ struct vcpu *local_vcpu, domid_t remote_domid,
+ xen_event_channel_notification_t notification_fn)
{
struct evtchn *chn;
struct domain *d = local_vcpu->domain;
@@ -1011,7 +1052,7 @@
chn = evtchn_from_port(d, port);
chn->state = ECS_UNBOUND;
- chn->consumer_is_xen = 1;
+ chn->xen_consumer = get_xen_consumer(notification_fn);
chn->notify_vcpu_id = local_vcpu->vcpu_id;
chn->u.unbound.remote_domid = remote_domid;
@@ -1038,8 +1079,8 @@
BUG_ON(!port_is_valid(d, port));
chn = evtchn_from_port(d, port);
- BUG_ON(!chn->consumer_is_xen);
- chn->consumer_is_xen = 0;
+ BUG_ON(!consumer_is_xen(chn));
+ chn->xen_consumer = 0;
spin_unlock(&d->event_lock);
@@ -1063,7 +1104,7 @@
ASSERT(port_is_valid(ld, lport));
lchn = evtchn_from_port(ld, lport);
- ASSERT(lchn->consumer_is_xen);
+ ASSERT(consumer_is_xen(lchn));
if ( likely(lchn->state == ECS_INTERDOMAIN) )
{
@@ -1106,7 +1147,7 @@
/* Close all existing event channels. */
for ( i = 0; port_is_valid(d, i); i++ )
{
- evtchn_from_port(d, i)->consumer_is_xen = 0;
+ evtchn_from_port(d, i)->xen_consumer = 0;
(void)__evtchn_close(d, i);
}
@@ -1192,7 +1233,7 @@
printk(" v=%d", chn->u.virq);
break;
}
- printk(" x=%d\n", chn->consumer_is_xen);
+ printk(" x=%d\n", chn->xen_consumer);
}
spin_unlock(&d->event_lock);
diff -r d35bedf334f2 -r 1620291f0c4a xen/include/xen/event.h
--- a/xen/include/xen/event.h Tue Dec 06 20:10:32 2011 +0000
+++ b/xen/include/xen/event.h Tue Dec 06 20:10:32 2011 +0000
@@ -51,8 +51,11 @@
void evtchn_move_pirqs(struct vcpu *v);
/* Allocate/free a Xen-attached event channel port. */
+typedef void (*xen_event_channel_notification_t)(
+ struct vcpu *v, unsigned int port);
int alloc_unbound_xen_event_channel(
- struct vcpu *local_vcpu, domid_t remote_domid);
+ struct vcpu *local_vcpu, domid_t remote_domid,
+ xen_event_channel_notification_t notification_fn);
void free_xen_event_channel(
struct vcpu *local_vcpu, int port);
diff -r d35bedf334f2 -r 1620291f0c4a xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Tue Dec 06 20:10:32 2011 +0000
+++ b/xen/include/xen/sched.h Tue Dec 06 20:10:32 2011 +0000
@@ -47,7 +47,7 @@
#define ECS_VIRQ 5 /* Channel is bound to a virtual IRQ line. */
#define ECS_IPI 6 /* Channel is bound to a virtual IPI line. */
u8 state; /* ECS_* */
- u8 consumer_is_xen; /* Consumed by Xen or by guest? */
+ u8 xen_consumer; /* Consumer in Xen, if any? (0 = send to guest) */
u16 notify_vcpu_id; /* VCPU for local delivery notification */
union {
struct {
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |