[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/4] x86/hvm: take a reference on ioreq server emulating domain
When an ioreq server is created the code currently stores the id of the emulating domain, but does not take a reference on that domain. This patch modifies the code to hold a reference for the lifetime of the ioreq server. NOTE: For the default server only it is theoritically possible for the emulating domain to change during the lifetime of the ioreq server (if a tools domain modifies HVM_PARAM_DM_DOMAIN) so this domain references are swizzled in this case. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/hvm/ioreq.c | 102 +++++++++++++++++++++++++++------------ xen/include/asm-x86/hvm/domain.h | 4 +- 2 files changed, 73 insertions(+), 33 deletions(-) diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c index ee5f47de65..8d1ff46146 100644 --- a/xen/arch/x86/hvm/ioreq.c +++ b/xen/arch/x86/hvm/ioreq.c @@ -218,7 +218,7 @@ static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool buf) static int hvm_map_ioreq_page( struct hvm_ioreq_server *s, bool buf, unsigned long gfn) { - struct domain *d = s->domain; + struct domain *d = s->target; struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq; struct page_info *page; void *va; @@ -305,6 +305,7 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s, bool is_default, struct vcpu *v) { struct hvm_ioreq_vcpu *sv; + domid_t domid; int rc; sv = xzalloc(struct hvm_ioreq_vcpu); @@ -315,7 +316,9 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s, spin_lock(&s->lock); - rc = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id, s->domid, + domid = s->emulator->domain_id; + + rc = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id, domid, NULL); if ( rc < 0 ) goto fail2; @@ -324,9 +327,9 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s, if ( v->vcpu_id == 0 && s->bufioreq.va != NULL ) { - struct domain *d = s->domain; + struct domain *d = s->target; - rc = alloc_unbound_xen_event_channel(v->domain, 0, s->domid, NULL); + rc = alloc_unbound_xen_event_channel(v->domain, 0, domid, NULL); if ( rc < 0 ) goto fail3; @@ -434,7 +437,7 @@ static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s, bool is_default, bool handle_bufioreq) { - struct domain *d = s->domain; + struct domain *d = s->target; unsigned long ioreq_gfn = gfn_x(INVALID_GFN); unsigned long bufioreq_gfn = gfn_x(INVALID_GFN); int rc; @@ -471,7 +474,7 @@ static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s, static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s, bool is_default) { - struct domain *d = s->domain; + struct domain *d = s->target; bool handle_bufioreq = !!s->bufioreq.va; if ( handle_bufioreq ) @@ -521,7 +524,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s, if ( rc ) goto fail; - s->range[i] = rangeset_new(s->domain, name, + s->range[i] = rangeset_new(s->target, name, RANGESETF_prettyprint_hex); xfree(name); @@ -545,7 +548,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s, static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s, bool is_default) { - struct domain *d = s->domain; + struct domain *d = s->target; struct hvm_ioreq_vcpu *sv; bool handle_bufioreq = !!s->bufioreq.va; @@ -576,7 +579,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s, static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s, bool is_default) { - struct domain *d = s->domain; + struct domain *d = s->target; bool handle_bufioreq = !!s->bufioreq.va; spin_lock(&s->lock); @@ -598,6 +601,20 @@ static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s, spin_unlock(&s->lock); } +static int get_default_emulator(domid_t domid, struct domain **emulator) +{ + bool ref; + + *emulator = rcu_lock_domain_by_id(domid); + if ( !*emulator ) + return -ESRCH; + + ref = !!get_domain(*emulator); /* fails if domain is dying */ + rcu_unlock_domain(*emulator); + + return ref ? 0 : -EACCES; +} + static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, struct domain *d, bool is_default, int bufioreq_handling, ioservid_t id) @@ -606,11 +623,23 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, int rc; s->id = id; - s->domain = d; + s->target = d; - s->domid = is_default ? - d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN] : - current->domain->domain_id; + if ( is_default ) + { + domid_t domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN]; + + rc = get_default_emulator(domid, &s->emulator); + if ( rc ) + return rc; + } else { + struct domain *currd = current->domain; + + if ( !get_domain(currd) ) + return -EACCES; + + s->emulator = currd; + } spin_lock_init(&s->lock); INIT_LIST_HEAD(&s->ioreq_vcpu_list); @@ -654,6 +683,8 @@ static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s, hvm_ioreq_server_remove_all_vcpus(s); hvm_ioreq_server_unmap_pages(s, is_default); hvm_ioreq_server_free_rangesets(s, is_default); + + put_domain(s->emulator); } static ioservid_t next_ioservid(struct domain *d) @@ -1125,35 +1156,46 @@ int hvm_set_dm_domain(struct domain *d, domid_t domid) domain_pause(d); spin_lock(&s->lock); - if ( s->domid != domid ) + if ( domid != s->emulator->domain_id ) { - struct hvm_ioreq_vcpu *sv; + struct domain *emulator; - list_for_each_entry ( sv, - &s->ioreq_vcpu_list, - list_entry ) + rc = get_default_emulator(domid, &emulator); + if ( !rc ) { - struct vcpu *v = sv->vcpu; + struct hvm_ioreq_vcpu *sv; - if ( v->vcpu_id == 0 ) + list_for_each_entry ( sv, + &s->ioreq_vcpu_list, + list_entry ) { + struct vcpu *v = sv->vcpu; + + if ( v->vcpu_id == 0 ) + { + rc = hvm_replace_event_channel(v, domid, + &s->bufioreq_evtchn); + if ( rc ) + break; + + d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] = + s->bufioreq_evtchn; + } + rc = hvm_replace_event_channel(v, domid, - &s->bufioreq_evtchn); + &sv->ioreq_evtchn); if ( rc ) break; - d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] = - s->bufioreq_evtchn; + hvm_update_ioreq_evtchn(s, sv); } - rc = hvm_replace_event_channel(v, domid, &sv->ioreq_evtchn); - if ( rc ) - break; - - hvm_update_ioreq_evtchn(s, sv); + if ( !rc ) + { + put_domain(s->emulator); + s->emulator = emulator; + } } - - s->domid = domid; } spin_unlock(&s->lock); diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index 7f128c05ff..6e03d024c8 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -53,13 +53,11 @@ struct hvm_ioreq_vcpu { struct hvm_ioreq_server { struct list_head list_entry; - struct domain *domain; + struct domain *target, *emulator; /* Lock to serialize toolstack modifications */ spinlock_t lock; - /* Domain id of emulating domain */ - domid_t domid; ioservid_t id; struct hvm_ioreq_page ioreq; struct list_head ioreq_vcpu_list; -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |