[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 5/9] ioreq-server: on-demand creation of ioreq server
This patch only creates the ioreq server when the legacy HVM parameters are read (by an emulator). A lock is introduced to protect access to the ioreq server by multiple emulator/tool invocations should such an eventuality arise. The guest is protected by creation of the ioreq server only being done whilst the domain is paused. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Cc: Keir Fraser <keir@xxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 265 +++++++++++++++++++++++++++----------- xen/include/asm-x86/hvm/domain.h | 1 + 2 files changed, 194 insertions(+), 72 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 1684705..bc073b5 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -389,40 +389,38 @@ void hvm_do_resume(struct vcpu *v) { struct domain *d = v->domain; struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server; - ioreq_t *p; check_wakeup_from_wait(); if ( is_hvm_vcpu(v) ) pt_restore_timer(v); - if ( !s ) - goto check_inject_trap; - - /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ - p = get_ioreq(s, v); - while ( p->state != STATE_IOREQ_NONE ) + if ( s ) { - switch ( p->state ) + ioreq_t *p = get_ioreq(s, v); + + while ( p->state != STATE_IOREQ_NONE ) { - case STATE_IORESP_READY: /* IORESP_READY -> NONE */ - rmb(); /* see IORESP_READY /then/ read contents of ioreq */ - hvm_io_assist(p); - break; - case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */ - case STATE_IOREQ_INPROCESS: - wait_on_xen_event_channel(p->vp_eport, - (p->state != STATE_IOREQ_READY) && - (p->state != STATE_IOREQ_INPROCESS)); - break; - default: - gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state); - domain_crash(v->domain); - return; /* bail */ + switch ( p->state ) + { + case STATE_IORESP_READY: /* IORESP_READY -> NONE */ + rmb(); /* see IORESP_READY /then/ read contents of ioreq */ + hvm_io_assist(p); + break; + case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */ + case STATE_IOREQ_INPROCESS: + wait_on_xen_event_channel(p->vp_eport, + (p->state != STATE_IOREQ_READY) && + (p->state != STATE_IOREQ_INPROCESS)); + break; + default: + gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state); + domain_crash(d); + return; /* bail */ + } } } - check_inject_trap: /* Inject pending hw/sw trap */ if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) { @@ -653,13 +651,70 @@ static void hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server *s, spin_unlock(&s->lock); } -static int hvm_create_ioreq_server(struct domain *d, domid_t domid) +static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s) { - struct hvm_ioreq_server *s; + struct hvm_ioreq_vcpu *sv, *next; - s = xzalloc(struct hvm_ioreq_server); - if ( !s ) - return -ENOMEM; + spin_lock(&s->lock); + + list_for_each_entry_safe ( sv, + next, + &s->ioreq_vcpu_list, + list_entry ) + { + struct vcpu *v = sv->vcpu; + + list_del_init(&sv->list_entry); + + if ( v->vcpu_id == 0 ) + free_xen_event_channel(v, s->bufioreq_evtchn); + + free_xen_event_channel(v, sv->ioreq_evtchn); + + xfree(sv); + } + + spin_unlock(&s->lock); +} + +static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s) +{ + struct domain *d = s->domain; + unsigned long pfn; + int rc; + + pfn = d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN]; + rc = hvm_map_ioreq_page(s, 0, pfn); + if ( rc ) + goto fail1; + + pfn = d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]; + rc = hvm_map_ioreq_page(s, 1, pfn); + if ( rc ) + goto fail2; + + return 0; + +fail2: + hvm_unmap_ioreq_page(s, 0); + +fail1: + return rc; +} + +static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s) +{ + hvm_unmap_ioreq_page(s, 1); + hvm_unmap_ioreq_page(s, 0); +} + +static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, struct domain *d, + domid_t domid) +{ + struct vcpu *v; + int rc; + + gdprintk(XENLOG_DEBUG, "%s %d\n", __func__, domid); s->domain = d; s->domid = domid; @@ -668,49 +723,95 @@ static int hvm_create_ioreq_server(struct domain *d, domid_t domid) INIT_LIST_HEAD(&s->ioreq_vcpu_list); spin_lock_init(&s->bufioreq_lock); - d->arch.hvm_domain.ioreq_server = s; + rc = hvm_ioreq_server_map_pages(s); + if ( rc ) + return rc; + + for_each_vcpu ( d, v ) + { + rc = hvm_ioreq_server_add_vcpu(s, v); + if ( rc ) + goto fail; + } + return 0; + + fail: + hvm_ioreq_server_remove_all_vcpus(s); + hvm_ioreq_server_unmap_pages(s); + + return rc; } -static void hvm_destroy_ioreq_server(struct domain *d) +static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s) { - struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server; + gdprintk(XENLOG_DEBUG, "%s %d\n", __func__, s->domid); - hvm_unmap_ioreq_page(s, 1); - hvm_unmap_ioreq_page(s, 0); - - xfree(s); + hvm_ioreq_server_remove_all_vcpus(s); + hvm_ioreq_server_unmap_pages(s); } -static int hvm_set_ioreq_pfn(struct domain *d, bool_t buf, - unsigned long pfn) +static int hvm_create_ioreq_server(struct domain *d, domid_t domid) { - struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server; + struct hvm_ioreq_server *s; int rc; - spin_lock(&s->lock); + rc = -ENOMEM; + s = xzalloc(struct hvm_ioreq_server); + if ( !s ) + goto fail1; + + domain_pause(d); + spin_lock(&d->arch.hvm_domain.ioreq_server_lock); + + rc = -EEXIST; + if ( d->arch.hvm_domain.ioreq_server != NULL ) + goto fail2; - rc = hvm_map_ioreq_page(s, buf, pfn); + rc = hvm_ioreq_server_init(s, d, domid); if ( rc ) - goto fail; + goto fail3; - if (!buf) { - struct hvm_ioreq_vcpu *sv; + d->arch.hvm_domain.ioreq_server = s; - list_for_each_entry ( sv, - &s->ioreq_vcpu_list, - list_entry ) - hvm_update_ioreq_evtchn(s, sv); - } + spin_unlock(&d->arch.hvm_domain.ioreq_server_lock); + domain_unpause(d); - spin_unlock(&s->lock); return 0; - fail: - spin_unlock(&s->lock); + fail3: + fail2: + spin_unlock(&d->arch.hvm_domain.ioreq_server_lock); + domain_unpause(d); + + xfree(s); + fail1: return rc; } +static void hvm_destroy_ioreq_server(struct domain *d) +{ + struct hvm_ioreq_server *s; + + domain_pause(d); + spin_lock(&d->arch.hvm_domain.ioreq_server_lock); + + s = d->arch.hvm_domain.ioreq_server; + if ( !s ) + goto done; + + d->arch.hvm_domain.ioreq_server = NULL; + + hvm_ioreq_server_deinit(s); + + done: + spin_unlock(&d->arch.hvm_domain.ioreq_server_lock); + domain_unpause(d); + + if ( s ) + xfree(s); +} + static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid, evtchn_port_t *p_port) { @@ -728,9 +829,15 @@ static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid, static int hvm_set_dm_domain(struct domain *d, domid_t domid) { - struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server; + struct hvm_ioreq_server *s; int rc = 0; + spin_lock(&d->arch.hvm_domain.ioreq_server_lock); + + s = d->arch.hvm_domain.ioreq_server; + if ( !s ) + goto done; + domain_pause(d); spin_lock(&s->lock); @@ -766,12 +873,13 @@ static int hvm_set_dm_domain(struct domain *d, domid_t domid) spin_unlock(&s->lock); domain_unpause(d); + done: + spin_unlock(&d->arch.hvm_domain.ioreq_server_lock); return rc; } int hvm_domain_initialise(struct domain *d) { - domid_t domid; int rc; if ( !hvm_enabled ) @@ -797,6 +905,7 @@ int hvm_domain_initialise(struct domain *d) } + spin_lock_init(&d->arch.hvm_domain.ioreq_server_lock); spin_lock_init(&d->arch.hvm_domain.irq_lock); spin_lock_init(&d->arch.hvm_domain.uc_lock); @@ -837,21 +946,14 @@ int hvm_domain_initialise(struct domain *d) rtc_init(d); - domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN]; - rc = hvm_create_ioreq_server(d, domid); - if ( rc != 0 ) - goto fail2; - register_portio_handler(d, 0xe9, 1, hvm_print_line); rc = hvm_funcs.domain_initialise(d); if ( rc != 0 ) - goto fail3; + goto fail2; return 0; - fail3: - hvm_destroy_ioreq_server(d); fail2: rtc_deinit(d); stdvga_deinit(d); @@ -1508,7 +1610,7 @@ int hvm_vcpu_initialise(struct vcpu *v) { int rc; struct domain *d = v->domain; - struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server; + struct hvm_ioreq_server *s; hvm_asid_flush_vcpu(v); @@ -1551,7 +1653,14 @@ int hvm_vcpu_initialise(struct vcpu *v) && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: nestedhvm_vcpu_destroy */ goto fail5; - rc = hvm_ioreq_server_add_vcpu(s, v); + spin_lock(&d->arch.hvm_domain.ioreq_server_lock); + + s = d->arch.hvm_domain.ioreq_server; + if ( s ) + rc = hvm_ioreq_server_add_vcpu(s, v); + + spin_unlock(&d->arch.hvm_domain.ioreq_server_lock); + if ( rc != 0 ) goto fail6; @@ -1588,9 +1697,15 @@ int hvm_vcpu_initialise(struct vcpu *v) void hvm_vcpu_destroy(struct vcpu *v) { struct domain *d = v->domain; - struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server; + struct hvm_ioreq_server *s; + + spin_lock(&d->arch.hvm_domain.ioreq_server_lock); - hvm_ioreq_server_remove_vcpu(s, v); + s = d->arch.hvm_domain.ioreq_server; + if ( s ) + hvm_ioreq_server_remove_vcpu(s, v); + + spin_unlock(&d->arch.hvm_domain.ioreq_server_lock); nestedhvm_vcpu_destroy(v); @@ -4404,12 +4519,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) switch ( a.index ) { - case HVM_PARAM_IOREQ_PFN: - rc = hvm_set_ioreq_pfn(d, 0, a.value); - break; - case HVM_PARAM_BUFIOREQ_PFN: - rc = hvm_set_ioreq_pfn(d, 1, a.value); - break; case HVM_PARAM_CALLBACK_IRQ: hvm_set_callback_via(d, a.value); hvm_latch_shinfo_size(d); @@ -4455,7 +4564,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) domctl_lock_release(); break; case HVM_PARAM_DM_DOMAIN: - /* Not reflexive, as we must domain_pause(). */ + /* Not reflexive, as we may need to domain_pause(). */ rc = -EPERM; if ( curr_d == d ) break; @@ -4561,6 +4670,18 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) case HVM_PARAM_ACPI_S_STATE: a.value = d->arch.hvm_domain.is_s3_suspended ? 3 : 0; break; + case HVM_PARAM_IOREQ_PFN: + case HVM_PARAM_BUFIOREQ_PFN: + case HVM_PARAM_BUFIOREQ_EVTCHN: { + domid_t domid; + + /* May need to create server */ + domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN]; + rc = hvm_create_ioreq_server(d, domid); + if ( rc != 0 && rc != -EEXIST ) + goto param_fail; + /*FALLTHRU*/ + } default: a.value = d->arch.hvm_domain.params[a.index]; break; diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index 92dc5fb..cd885de 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -65,6 +65,7 @@ struct hvm_ioreq_server { struct hvm_domain { struct hvm_ioreq_server *ioreq_server; + spinlock_t ioreq_server_lock; struct pl_time pl_time; -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |