[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 4/6] ioreq-server: on-demand creation of ioreq server
This patch only creates the ioreq server when the legacy HVM parameters are touched by an emulator. It also lays some groundwork for supporting multiple IOREQ servers. For instance, it introduces ioreq server reference counting which is not strictly necessary at this stage but will become so when ioreq servers can be destroyed prior the domain dying. There is a significant change in the layout of the special pages reserved in xc_hvm_build_x86.c. This is so that we can 'grow' them downwards without moving pages such as the xenstore page when building a domain that can support more than one emulator. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- tools/libxc/xc_hvm_build_x86.c | 40 +++++-- xen/arch/x86/hvm/hvm.c | 240 +++++++++++++++++++++++++--------------- 2 files changed, 176 insertions(+), 104 deletions(-) diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c index dd3b522..b65e702 100644 --- a/tools/libxc/xc_hvm_build_x86.c +++ b/tools/libxc/xc_hvm_build_x86.c @@ -41,13 +41,12 @@ #define SPECIALPAGE_PAGING 0 #define SPECIALPAGE_ACCESS 1 #define SPECIALPAGE_SHARING 2 -#define SPECIALPAGE_BUFIOREQ 3 -#define SPECIALPAGE_XENSTORE 4 -#define SPECIALPAGE_IOREQ 5 -#define SPECIALPAGE_IDENT_PT 6 -#define SPECIALPAGE_CONSOLE 7 -#define NR_SPECIAL_PAGES 8 -#define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x)) +#define SPECIALPAGE_XENSTORE 3 +#define SPECIALPAGE_IDENT_PT 4 +#define SPECIALPAGE_CONSOLE 5 +#define SPECIALPAGE_IOREQ 6 +#define NR_SPECIAL_PAGES SPECIALPAGE_IOREQ + 2 /* ioreq server needs 2 pages */ +#define special_pfn(x) (0xff000u - 1 - (x)) #define VGA_HOLE_SIZE (0x20) @@ -114,7 +113,7 @@ static void build_hvm_info(void *hvm_info_page, uint64_t mem_size, /* Memory parameters. */ hvm_info->low_mem_pgend = lowmem_end >> PAGE_SHIFT; hvm_info->high_mem_pgend = highmem_end >> PAGE_SHIFT; - hvm_info->reserved_mem_pgstart = special_pfn(0); + hvm_info->reserved_mem_pgstart = special_pfn(0) - NR_SPECIAL_PAGES; /* Finish with the checksum. */ for ( i = 0, sum = 0; i < hvm_info->length; i++ ) @@ -473,6 +472,23 @@ static int setup_guest(xc_interface *xch, munmap(hvm_info_page, PAGE_SIZE); /* Allocate and clear special pages. */ + + DPRINTF("%d SPECIAL PAGES:\n", NR_SPECIAL_PAGES); + DPRINTF(" PAGING: %"PRI_xen_pfn"\n", + (xen_pfn_t)special_pfn(SPECIALPAGE_PAGING)); + DPRINTF(" ACCESS: %"PRI_xen_pfn"\n", + (xen_pfn_t)special_pfn(SPECIALPAGE_ACCESS)); + DPRINTF(" SHARING: %"PRI_xen_pfn"\n", + (xen_pfn_t)special_pfn(SPECIALPAGE_SHARING)); + DPRINTF(" STORE: %"PRI_xen_pfn"\n", + (xen_pfn_t)special_pfn(SPECIALPAGE_XENSTORE)); + DPRINTF(" IDENT_PT: %"PRI_xen_pfn"\n", + (xen_pfn_t)special_pfn(SPECIALPAGE_IDENT_PT)); + DPRINTF(" CONSOLE: %"PRI_xen_pfn"\n", + (xen_pfn_t)special_pfn(SPECIALPAGE_CONSOLE)); + DPRINTF(" IOREQ: %"PRI_xen_pfn"\n", + (xen_pfn_t)special_pfn(SPECIALPAGE_IOREQ)); + for ( i = 0; i < NR_SPECIAL_PAGES; i++ ) { xen_pfn_t pfn = special_pfn(i); @@ -488,10 +504,6 @@ static int setup_guest(xc_interface *xch, xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_PFN, special_pfn(SPECIALPAGE_XENSTORE)); - xc_set_hvm_param(xch, dom, HVM_PARAM_BUFIOREQ_PFN, - special_pfn(SPECIALPAGE_BUFIOREQ)); - xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_PFN, - special_pfn(SPECIALPAGE_IOREQ)); xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN, special_pfn(SPECIALPAGE_CONSOLE)); xc_set_hvm_param(xch, dom, HVM_PARAM_PAGING_RING_PFN, @@ -500,6 +512,10 @@ static int setup_guest(xc_interface *xch, special_pfn(SPECIALPAGE_ACCESS)); xc_set_hvm_param(xch, dom, HVM_PARAM_SHARING_RING_PFN, special_pfn(SPECIALPAGE_SHARING)); + xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_PFN, + special_pfn(SPECIALPAGE_IOREQ)); + xc_set_hvm_param(xch, dom, HVM_PARAM_BUFIOREQ_PFN, + special_pfn(SPECIALPAGE_IOREQ) - 1); /* * Identity-map page table is required for running with CR0.PG=0 when diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index bbf9577..22b2a2c 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -366,22 +366,9 @@ bool_t hvm_io_pending(struct vcpu *v) return ( p->state != STATE_IOREQ_NONE ); } -void hvm_do_resume(struct vcpu *v) +static void hvm_wait_on_io(struct domain *d, ioreq_t *p) { - struct domain *d = v->domain; - struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server; - ioreq_t *p; - - check_wakeup_from_wait(); - - if ( is_hvm_vcpu(v) ) - pt_restore_timer(v); - - if ( !s ) - goto check_inject_trap; - /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ - p = get_ioreq(s, v->vcpu_id); while ( p->state != STATE_IOREQ_NONE ) { switch ( p->state ) @@ -397,12 +384,29 @@ void hvm_do_resume(struct vcpu *v) break; default: gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state); - domain_crash(v->domain); + domain_crash(d); return; /* bail */ } } +} + +void hvm_do_resume(struct vcpu *v) +{ + struct domain *d = v->domain; + struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server; + + check_wakeup_from_wait(); + + if ( is_hvm_vcpu(v) ) + pt_restore_timer(v); + + if ( s ) + { + ioreq_t *p = get_ioreq(s, v->vcpu_id); + + hvm_wait_on_io(d, p); + } - check_inject_trap: /* Inject pending hw/sw trap */ if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) { @@ -411,11 +415,13 @@ void hvm_do_resume(struct vcpu *v) } } -static void hvm_init_ioreq_page( - struct domain *d, struct hvm_ioreq_page *iorp) +static void hvm_init_ioreq_page(struct hvm_ioreq_server *s, bool_t buf) { + struct hvm_ioreq_page *iorp; + + iorp = buf ? &s->buf_ioreq : &s->ioreq; + spin_lock_init(&iorp->lock); - domain_pause(d); } void destroy_ring_for_helper( @@ -431,16 +437,13 @@ void destroy_ring_for_helper( } } -static void hvm_destroy_ioreq_page( - struct domain *d, struct hvm_ioreq_page *iorp) +static void hvm_destroy_ioreq_page(struct hvm_ioreq_server *s, bool_t buf) { - spin_lock(&iorp->lock); + struct hvm_ioreq_page *iorp; - ASSERT(d->is_dying); + iorp = buf ? &s->buf_ioreq : &s->ioreq; destroy_ring_for_helper(&iorp->va, iorp->page); - - spin_unlock(&iorp->lock); } int prepare_ring_for_helper( @@ -487,9 +490,11 @@ int prepare_ring_for_helper( return 0; } -static int hvm_set_ioreq_page( - struct domain *d, struct hvm_ioreq_page *iorp, unsigned long gmfn) +static int hvm_set_ioreq_page(struct hvm_ioreq_server *s, bool_t buf, + unsigned long gmfn) { + struct domain *d = s->domain; + struct hvm_ioreq_page *iorp; struct page_info *page; void *va; int rc; @@ -497,22 +502,17 @@ static int hvm_set_ioreq_page( if ( (rc = prepare_ring_for_helper(d, gmfn, &page, &va)) ) return rc; - spin_lock(&iorp->lock); + iorp = buf ? &s->buf_ioreq : &s->ioreq; if ( (iorp->va != NULL) || d->is_dying ) { - destroy_ring_for_helper(&iorp->va, iorp->page); - spin_unlock(&iorp->lock); + destroy_ring_for_helper(&va, page); return -EINVAL; } iorp->va = va; iorp->page = page; - spin_unlock(&iorp->lock); - - domain_unpause(d); - return 0; } @@ -606,29 +606,88 @@ static void hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server *s, struct vcpu free_xen_event_channel(v, v->arch.hvm_vcpu.ioreq_evtchn); } -static int hvm_create_ioreq_server(struct domain *d) +static int hvm_create_ioreq_server(struct domain *d, domid_t domid) { struct hvm_ioreq_server *s; + unsigned long pfn; + struct vcpu *v; + int rc; + + if ( d->arch.hvm_domain.ioreq_server != NULL ) + return -EEXIST; + gdprintk(XENLOG_DEBUG, "%s: %d\n", __func__, d->domain_id); + + rc = -ENOMEM; s = xzalloc(struct hvm_ioreq_server); if ( !s ) - return -ENOMEM; + goto fail_alloc; s->domain = d; + s->domid = domid; + + /* Initialize shared pages */ + pfn = d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN]; + + hvm_init_ioreq_page(s, 0); + if ( (rc = hvm_set_ioreq_page(s, 0, pfn)) < 0 ) + goto fail_set_ioreq; - hvm_init_ioreq_page(d, &s->ioreq); - hvm_init_ioreq_page(d, &s->buf_ioreq); + pfn = d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]; + + hvm_init_ioreq_page(s, 1); + if ( (rc = hvm_set_ioreq_page(s, 1, pfn)) < 0 ) + goto fail_set_buf_ioreq; + + domain_pause(d); + + for_each_vcpu ( d, v ) + { + if ( (rc = hvm_ioreq_server_add_vcpu(s, v)) < 0 ) + goto fail_add_vcpu; + } d->arch.hvm_domain.ioreq_server = s; + + domain_unpause(d); + return 0; + + fail_add_vcpu: + for_each_vcpu ( d, v ) + hvm_ioreq_server_remove_vcpu(s, v); + domain_unpause(d); + hvm_destroy_ioreq_page(s, 1); + fail_set_buf_ioreq: + hvm_destroy_ioreq_page(s, 0); + fail_set_ioreq: + xfree(s); + fail_alloc: + return rc; } static void hvm_destroy_ioreq_server(struct domain *d) { - struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server; + struct hvm_ioreq_server *s; + struct vcpu *v; + + gdprintk(XENLOG_DEBUG, "%s: %d\n", __func__, d->domain_id); + + s = d->arch.hvm_domain.ioreq_server; + if ( !s ) + return; + + domain_pause(d); + + d->arch.hvm_domain.ioreq_server = NULL; + + for_each_vcpu ( d, v ) + hvm_ioreq_server_remove_vcpu(s, v); + + domain_unpause(d); - hvm_destroy_ioreq_page(d, &s->ioreq); - hvm_destroy_ioreq_page(d, &s->buf_ioreq); + hvm_destroy_ioreq_page(s, 1); + hvm_destroy_ioreq_page(s, 0); xfree(s); } @@ -648,14 +707,22 @@ static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid, return 0; } -static int hvm_set_ioreq_server_domid(struct hvm_ioreq_server *s, domid_t domid) +static int hvm_set_ioreq_server_domid(struct domain *d, domid_t domid) { - struct domain *d = s->domain; + struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server; struct vcpu *v; int rc; domain_pause(d); + rc = -ENOENT; + if ( !s ) + goto done; + + rc = 0; + if ( s->domid == domid ) + goto done; + for_each_vcpu ( d, v ) { rc = hvm_replace_event_channel(v, domid, &v->arch.hvm_vcpu.ioreq_evtchn); @@ -682,23 +749,6 @@ static int hvm_set_ioreq_server_domid(struct hvm_ioreq_server *s, domid_t domid) return rc; } -static int hvm_set_ioreq_server_pfn(struct hvm_ioreq_server *s, unsigned long pfn) -{ - int rc; - - rc = hvm_set_ioreq_page(s->domain, &s->ioreq, pfn); - if ( rc ) - return rc; - - hvm_update_ioreq_server_evtchn(s); - return 0; -} - -static int hvm_set_ioreq_server_buf_pfn(struct hvm_ioreq_server *s, unsigned long pfn) -{ - return hvm_set_ioreq_page(s->domain, &s->buf_ioreq, pfn); -} - int hvm_domain_initialise(struct domain *d) { int rc; @@ -766,20 +816,14 @@ int hvm_domain_initialise(struct domain *d) rtc_init(d); - rc = hvm_create_ioreq_server(d); - if ( rc != 0 ) - goto fail2; - register_portio_handler(d, 0xe9, 1, hvm_print_line); rc = hvm_funcs.domain_initialise(d); if ( rc != 0 ) - goto fail3; + goto fail2; return 0; - fail3: - hvm_destroy_ioreq_server(d); fail2: rtc_deinit(d); stdvga_deinit(d); @@ -1478,9 +1522,12 @@ int hvm_vcpu_initialise(struct vcpu *v) && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: nestedhvm_vcpu_destroy */ goto fail5; - rc = hvm_ioreq_server_add_vcpu(s, v); - if ( rc != 0 ) - goto fail6; + if ( s ) + { + rc = hvm_ioreq_server_add_vcpu(s, v); + if ( rc != 0 ) + goto fail6; + } if ( v->vcpu_id == 0 ) { @@ -1518,7 +1565,8 @@ void hvm_vcpu_destroy(struct vcpu *v) struct domain *d = v->domain; struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server; - hvm_ioreq_server_remove_vcpu(s, v); + if ( s ) + hvm_ioreq_server_remove_vcpu(s, v); nestedhvm_vcpu_destroy(v); @@ -1644,19 +1692,12 @@ bool_t hvm_has_dm(struct domain *d) return !!d->arch.hvm_domain.ioreq_server; } -bool_t hvm_send_assist_req(struct vcpu *v, const ioreq_t *proto_p) +static bool_t hvm_send_assist_req_to_server(struct hvm_ioreq_server *s, + struct vcpu *v, + const ioreq_t *proto_p) { struct domain *d = v->domain; - struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server; - ioreq_t *p; - - if ( unlikely(!vcpu_start_shutdown_deferral(v)) ) - return 0; /* implicitly bins the i/o operation */ - - if ( !s ) - return 0; - - p = get_ioreq(s, v->vcpu_id); + ioreq_t *p = get_ioreq(s, v->vcpu_id); if ( unlikely(p->state != STATE_IOREQ_NONE) ) { @@ -1687,6 +1728,20 @@ bool_t hvm_send_assist_req(struct vcpu *v, const ioreq_t *proto_p) return 1; } +bool_t hvm_send_assist_req(struct vcpu *v, const ioreq_t *p) +{ + struct domain *d = v->domain; + struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server; + + if ( unlikely(!vcpu_start_shutdown_deferral(v)) ) + return 0; + + if ( !s ) + return 0; + + return hvm_send_assist_req_to_server(s, v, p); +} + void hvm_hlt(unsigned long rflags) { struct vcpu *curr = current; @@ -4318,14 +4373,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) switch ( a.index ) { - case HVM_PARAM_IOREQ_PFN: - rc = hvm_set_ioreq_server_pfn(d->arch.hvm_domain.ioreq_server, - a.value); - break; - case HVM_PARAM_BUFIOREQ_PFN: - rc = hvm_set_ioreq_server_buf_pfn(d->arch.hvm_domain.ioreq_server, - a.value); - break; case HVM_PARAM_CALLBACK_IRQ: hvm_set_callback_via(d, a.value); hvm_latch_shinfo_size(d); @@ -4379,8 +4426,9 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) if ( a.value == DOMID_SELF ) a.value = curr_d->domain_id; - rc = hvm_set_ioreq_server_domid(d->arch.hvm_domain.ioreq_server, - a.value); + rc = hvm_create_ioreq_server(d, a.value); + if ( rc == -EEXIST ) + rc = hvm_set_ioreq_server_domid(d, a.value); break; case HVM_PARAM_ACPI_S_STATE: /* Not reflexive, as we must domain_pause(). */ @@ -4478,6 +4526,14 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) case HVM_PARAM_ACPI_S_STATE: a.value = d->arch.hvm_domain.is_s3_suspended ? 3 : 0; break; + case HVM_PARAM_IOREQ_PFN: + case HVM_PARAM_BUFIOREQ_PFN: + case HVM_PARAM_BUFIOREQ_EVTCHN: + /* May need to create server */ + rc = hvm_create_ioreq_server(d, curr_d->domain_id); + if ( rc != 0 && rc != -EEXIST ) + goto param_fail; + /*FALLTHRU*/ default: a.value = d->arch.hvm_domain.params[a.index]; break; -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |