[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2 of 5] Create a generic callback mechanism for Xen-bound event channels
xen/arch/ia64/vmx/vmx_init.c | 2 +- xen/arch/x86/hvm/hvm.c | 7 ++- xen/arch/x86/mm/mem_event.c | 3 +- xen/common/event_channel.c | 75 ++++++++++++++++++++++++++++++++++--------- xen/include/xen/event.h | 5 ++- xen/include/xen/sched.h | 2 +- 6 files changed, 70 insertions(+), 24 deletions(-) For event channels for which Xen is the consumer, there currently is a single action. With this patch, we allow event channel creators to specify a generic callback (or no callback). Because the expectation is that there will be few callbacks, they are stored in a small table. Signed-off-by: Adin Scannell <adin@xxxxxxxxxxx> Signed-off-by: Keir Fraser <keir@xxxxxxx> Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx> diff -r 2a4ec2e2ae36 -r 4c19931b40d5 xen/arch/ia64/vmx/vmx_init.c --- a/xen/arch/ia64/vmx/vmx_init.c +++ b/xen/arch/ia64/vmx/vmx_init.c @@ -377,7 +377,7 @@ vmx_vcpu_initialise(struct vcpu *v) { struct vmx_ioreq_page *iorp = &v->domain->arch.hvm_domain.ioreq; - int rc = alloc_unbound_xen_event_channel(v, 0); + int rc = alloc_unbound_xen_event_channel(v, 0, NULL); if (rc < 0) return rc; v->arch.arch_vmx.xen_port = rc; diff -r 2a4ec2e2ae36 -r 4c19931b40d5 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -979,7 +979,7 @@ int hvm_vcpu_initialise(struct vcpu *v) goto fail3; /* Create ioreq event channel. */ - rc = alloc_unbound_xen_event_channel(v, 0); + rc = alloc_unbound_xen_event_channel(v, 0, NULL); if ( rc < 0 ) goto fail4; @@ -989,7 +989,7 @@ int hvm_vcpu_initialise(struct vcpu *v) if ( v->vcpu_id == 0 ) { /* Create bufioreq event channel. */ - rc = alloc_unbound_xen_event_channel(v, 0); + rc = alloc_unbound_xen_event_channel(v, 0, NULL); if ( rc < 0 ) goto fail2; v->domain->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] = rc; @@ -3561,7 +3561,8 @@ long do_hvm_op(unsigned long op, XEN_GUE for_each_vcpu ( d, v ) { int old_port, new_port; - new_port = alloc_unbound_xen_event_channel(v, a.value); + new_port = alloc_unbound_xen_event_channel( + v, a.value, NULL); if ( new_port < 0 ) { rc = new_port; diff -r 2a4ec2e2ae36 -r 4c19931b40d5 xen/arch/x86/mm/mem_event.c --- a/xen/arch/x86/mm/mem_event.c +++ b/xen/arch/x86/mm/mem_event.c @@ -93,7 +93,8 @@ static int mem_event_enable(struct domai /* Allocate event channel */ rc = alloc_unbound_xen_event_channel(d->vcpu[0], - current->domain->domain_id); + current->domain->domain_id, + NULL); if ( rc < 0 ) goto err; diff -r 2a4ec2e2ae36 -r 4c19931b40d5 xen/common/event_channel.c --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -57,6 +57,51 @@ goto out; \ } while ( 0 ) +#define consumer_is_xen(e) (!!(e)->xen_consumer) + +/* + * The function alloc_unbound_xen_event_channel() allows an arbitrary + * notifier function to be specified. However, very few unique functions + * are specified in practice, so to prevent bloating the evtchn structure + * with a pointer, we stash them dynamically in a small lookup array which + * can be indexed by a small integer. + */ +static xen_event_channel_notification_t xen_consumers[8]; + +/* Default notification action: wake up from wait_on_xen_event_channel(). */ +static void default_xen_notification_fn(struct vcpu *v, unsigned int port) +{ + /* Consumer needs notification only if blocked. */ + if ( test_and_clear_bit(_VPF_blocked_in_xen, &v->pause_flags) ) + vcpu_wake(v); +} + +/* + * Given a notification function, return the value to stash in + * the evtchn->xen_consumer field. + */ +static uint8_t get_xen_consumer(xen_event_channel_notification_t fn) +{ + unsigned int i; + + if ( fn == NULL ) + fn = default_xen_notification_fn; + + for ( i = 0; i < ARRAY_SIZE(xen_consumers); i++ ) + { + if ( xen_consumers[i] == NULL ) + xen_consumers[i] = fn; + if ( xen_consumers[i] == fn ) + break; + } + + BUG_ON(i >= ARRAY_SIZE(xen_consumers)); + return i+1; +} + +/* Get the notification function for a given Xen-bound event channel. */ +#define xen_notification_fn(e) (xen_consumers[(e)->xen_consumer-1]) + static int evtchn_set_pending(struct vcpu *v, int port); static int virq_is_global(int virq) @@ -397,7 +442,7 @@ static long __evtchn_close(struct domain chn1 = evtchn_from_port(d1, port1); /* Guest cannot close a Xen-attached event channel. */ - if ( unlikely(chn1->consumer_is_xen) ) + if ( unlikely(consumer_is_xen(chn1)) ) { rc = -EINVAL; goto out; @@ -537,7 +582,7 @@ int evtchn_send(struct domain *d, unsign lchn = evtchn_from_port(ld, lport); /* Guest cannot send via a Xen-attached event channel. */ - if ( unlikely(lchn->consumer_is_xen) ) + if ( unlikely(consumer_is_xen(lchn)) ) { spin_unlock(&ld->event_lock); return -EINVAL; @@ -554,13 +599,8 @@ int evtchn_send(struct domain *d, unsign rport = lchn->u.interdomain.remote_port; rchn = evtchn_from_port(rd, rport); rvcpu = rd->vcpu[rchn->notify_vcpu_id]; - if ( rchn->consumer_is_xen ) - { - /* Xen consumers need notification only if they are blocked. */ - if ( test_and_clear_bit(_VPF_blocked_in_xen, - &rvcpu->pause_flags) ) - vcpu_wake(rvcpu); - } + if ( consumer_is_xen(rchn) ) + (*xen_notification_fn(rchn))(rvcpu, rport); else { evtchn_set_pending(rvcpu, rport); @@ -787,7 +827,7 @@ long evtchn_bind_vcpu(unsigned int port, chn = evtchn_from_port(d, port); /* Guest cannot re-bind a Xen-attached event channel. */ - if ( unlikely(chn->consumer_is_xen) ) + if ( unlikely(consumer_is_xen(chn)) ) { rc = -EINVAL; goto out; @@ -998,7 +1038,8 @@ long do_event_channel_op(int cmd, XEN_GU int alloc_unbound_xen_event_channel( - struct vcpu *local_vcpu, domid_t remote_domid) + struct vcpu *local_vcpu, domid_t remote_domid, + xen_event_channel_notification_t notification_fn) { struct evtchn *chn; struct domain *d = local_vcpu->domain; @@ -1011,7 +1052,7 @@ int alloc_unbound_xen_event_channel( chn = evtchn_from_port(d, port); chn->state = ECS_UNBOUND; - chn->consumer_is_xen = 1; + chn->xen_consumer = get_xen_consumer(notification_fn); chn->notify_vcpu_id = local_vcpu->vcpu_id; chn->u.unbound.remote_domid = remote_domid; @@ -1038,8 +1079,8 @@ void free_xen_event_channel( BUG_ON(!port_is_valid(d, port)); chn = evtchn_from_port(d, port); - BUG_ON(!chn->consumer_is_xen); - chn->consumer_is_xen = 0; + BUG_ON(!consumer_is_xen(chn)); + chn->xen_consumer = 0; spin_unlock(&d->event_lock); @@ -1063,7 +1104,7 @@ void notify_via_xen_event_channel(struct ASSERT(port_is_valid(ld, lport)); lchn = evtchn_from_port(ld, lport); - ASSERT(lchn->consumer_is_xen); + ASSERT(consumer_is_xen(lchn)); if ( likely(lchn->state == ECS_INTERDOMAIN) ) { @@ -1106,7 +1147,7 @@ void evtchn_destroy(struct domain *d) /* Close all existing event channels. */ for ( i = 0; port_is_valid(d, i); i++ ) { - evtchn_from_port(d, i)->consumer_is_xen = 0; + evtchn_from_port(d, i)->xen_consumer = 0; (void)__evtchn_close(d, i); } @@ -1192,7 +1233,7 @@ static void domain_dump_evtchn_info(stru printk(" v=%d", chn->u.virq); break; } - printk(" x=%d\n", chn->consumer_is_xen); + printk(" x=%d\n", chn->xen_consumer); } spin_unlock(&d->event_lock); diff -r 2a4ec2e2ae36 -r 4c19931b40d5 xen/include/xen/event.h --- a/xen/include/xen/event.h +++ b/xen/include/xen/event.h @@ -51,8 +51,11 @@ int evtchn_unmask(unsigned int port); void evtchn_move_pirqs(struct vcpu *v); /* Allocate/free a Xen-attached event channel port. */ +typedef void (*xen_event_channel_notification_t)( + struct vcpu *v, unsigned int port); int alloc_unbound_xen_event_channel( - struct vcpu *local_vcpu, domid_t remote_domid); + struct vcpu *local_vcpu, domid_t remote_domid, + xen_event_channel_notification_t notification_fn); void free_xen_event_channel( struct vcpu *local_vcpu, int port); diff -r 2a4ec2e2ae36 -r 4c19931b40d5 xen/include/xen/sched.h --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -47,7 +47,7 @@ struct evtchn #define ECS_VIRQ 5 /* Channel is bound to a virtual IRQ line. */ #define ECS_IPI 6 /* Channel is bound to a virtual IPI line. */ u8 state; /* ECS_* */ - u8 consumer_is_xen; /* Consumed by Xen or by guest? */ + u8 xen_consumer; /* Consumer in Xen, if any? (0 = send to guest) */ u16 notify_vcpu_id; /* VCPU for local delivery notification */ union { struct { _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |