[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2] xen/evtchn: Alter the alloc/free xen event channel functions to take a domain
The resource behind an event channel is domain centric rather than vcpu centric. This change allows mem_event_disable() to avoid arbitrarily referencing d->vcpu[0] just to pass the domain. No functional change. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CC: Keir Fraser <keir@xxxxxxx> CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Tim Deegan <tim@xxxxxxx> --- v2: Adjust alloc_unbound_xen_event_channel() as well (suggested by Jan) --- xen/arch/x86/hvm/hvm.c | 20 +++++++++++--------- xen/common/event_channel.c | 19 ++++++++----------- xen/common/mem_event.c | 4 ++-- xen/include/xen/event.h | 5 ++--- 4 files changed, 23 insertions(+), 25 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index fd2314e..6a7d8a9 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -627,7 +627,7 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s, spin_lock(&s->lock); - rc = alloc_unbound_xen_event_channel(v, s->domid, NULL); + rc = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id, s->domid, NULL); if ( rc < 0 ) goto fail2; @@ -637,7 +637,8 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s, { struct domain *d = s->domain; - rc = alloc_unbound_xen_event_channel(v, s->domid, NULL); + rc = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id, + s->domid, NULL); if ( rc < 0 ) goto fail3; @@ -658,7 +659,7 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s, return 0; fail3: - free_xen_event_channel(v, sv->ioreq_evtchn); + free_xen_event_channel(v->domain, sv->ioreq_evtchn); fail2: spin_unlock(&s->lock); @@ -685,9 +686,9 @@ static void hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server *s, list_del(&sv->list_entry); if ( v->vcpu_id == 0 && s->bufioreq.va != NULL ) - free_xen_event_channel(v, s->bufioreq_evtchn); + free_xen_event_channel(v->domain, s->bufioreq_evtchn); - free_xen_event_channel(v, sv->ioreq_evtchn); + free_xen_event_channel(v->domain, sv->ioreq_evtchn); xfree(sv); break; @@ -712,9 +713,9 @@ static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s) list_del(&sv->list_entry); if ( v->vcpu_id == 0 && s->bufioreq.va != NULL ) - free_xen_event_channel(v, s->bufioreq_evtchn); + free_xen_event_channel(v->domain, s->bufioreq_evtchn); - free_xen_event_channel(v, sv->ioreq_evtchn); + free_xen_event_channel(v->domain, sv->ioreq_evtchn); xfree(sv); } @@ -1338,13 +1339,14 @@ static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid, { int old_port, new_port; - new_port = alloc_unbound_xen_event_channel(v, remote_domid, NULL); + new_port = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id, + remote_domid, NULL); if ( new_port < 0 ) return new_port; /* xchg() ensures that only we call free_xen_event_channel(). */ old_port = xchg(p_port, new_port); - free_xen_event_channel(v, old_port); + free_xen_event_channel(v->domain, old_port); return 0; } diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 4a52f69..b756d7b 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -1139,42 +1139,39 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) int alloc_unbound_xen_event_channel( - struct vcpu *local_vcpu, domid_t remote_domid, + struct domain *ld, unsigned int lvcpu, domid_t remote_domid, xen_event_channel_notification_t notification_fn) { struct evtchn *chn; - struct domain *d = local_vcpu->domain; int port, rc; - spin_lock(&d->event_lock); + spin_lock(&ld->event_lock); - rc = get_free_port(d); + rc = get_free_port(ld); if ( rc < 0 ) goto out; port = rc; - chn = evtchn_from_port(d, port); + chn = evtchn_from_port(ld, port); - rc = xsm_evtchn_unbound(XSM_TARGET, d, chn, remote_domid); + rc = xsm_evtchn_unbound(XSM_TARGET, ld, chn, remote_domid); if ( rc ) goto out; chn->state = ECS_UNBOUND; chn->xen_consumer = get_xen_consumer(notification_fn); - chn->notify_vcpu_id = local_vcpu->vcpu_id; + chn->notify_vcpu_id = lvcpu; chn->u.unbound.remote_domid = remote_domid; out: - spin_unlock(&d->event_lock); + spin_unlock(&ld->event_lock); return rc < 0 ? rc : port; } -void free_xen_event_channel( - struct vcpu *local_vcpu, int port) +void free_xen_event_channel(struct domain *d, int port) { struct evtchn *chn; - struct domain *d = local_vcpu->domain; spin_lock(&d->event_lock); diff --git a/xen/common/mem_event.c b/xen/common/mem_event.c index 16ebdb5..0cb2334 100644 --- a/xen/common/mem_event.c +++ b/xen/common/mem_event.c @@ -81,7 +81,7 @@ static int mem_event_enable( med->blocked = 0; /* Allocate event channel */ - rc = alloc_unbound_xen_event_channel(d->vcpu[0], + rc = alloc_unbound_xen_event_channel(d, 0, current->domain->domain_id, notification_fn); if ( rc < 0 ) @@ -221,7 +221,7 @@ static int mem_event_disable(struct domain *d, struct mem_event_domain *med) } /* Free domU's event channel and leave the other one unbound */ - free_xen_event_channel(d->vcpu[0], med->xen_port); + free_xen_event_channel(d, med->xen_port); /* Unblock all vCPUs */ for_each_vcpu ( d, v ) diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h index 2b63f9a..690f865 100644 --- a/xen/include/xen/event.h +++ b/xen/include/xen/event.h @@ -58,10 +58,9 @@ void evtchn_move_pirqs(struct vcpu *v); typedef void (*xen_event_channel_notification_t)( struct vcpu *v, unsigned int port); int alloc_unbound_xen_event_channel( - struct vcpu *local_vcpu, domid_t remote_domid, + struct domain *ld, unsigned int lvcpu, domid_t remote_domid, xen_event_channel_notification_t notification_fn); -void free_xen_event_channel( - struct vcpu *local_vcpu, int port); +void free_xen_event_channel(struct domain *d, int port); /* Query if event channel is in use by the guest */ int guest_enabled_event(struct vcpu *v, uint32_t virq); -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |