[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 12/12] xen/domain: Allocate d->vcpu[] in domain_create()
For ARM, the call to arch_domain_create() needs to have completed before domain_max_vcpus() will return the correct upper bound. For each arch's dom0's, drop the temporary max_vcpus parameter, and allocation of dom0->vcpu. With d->max_vcpus now correctly configured before evtchn_init(), the poll mask can be constructed suitably for the domain, rather than for the worst-case setting. Due to the evtchn_init() fixes, it no longer calls domain_max_vcpus(), and ARM's two implementations of vgic_max_vcpus() no longer need work around the out-of-order call. From this point on, d->max_vcpus and d->vcpus[] are valid for any domain which can be looked up by domid. The XEN_DOMCTL_max_vcpus hypercall is modified to reject any call attempt with max != d->max_vcpus, which does match the older semantics (not that it is obvious from the code). The logic to allocate d->vcpu[] is dropped, but at this point the hypercall still needs making to allocate each vcpu. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Stefano Stabellini <sstabellini@xxxxxxxxxx> CC: Julien Grall <julien.grall@xxxxxxx> CC: Wei Liu <wei.liu2@xxxxxxxxxx> v2: * Allocate in domain_create() rather than arch_domain_create(). * Retain domain_max_vcpus(). --- xen/arch/arm/domain_build.c | 8 +------- xen/arch/arm/setup.c | 2 +- xen/arch/arm/vgic.c | 11 +---------- xen/arch/arm/vgic/vgic.c | 22 +--------------------- xen/arch/x86/dom0_build.c | 8 +------- xen/arch/x86/setup.c | 2 +- xen/common/domain.c | 18 ++++++++++++++++++ xen/common/domctl.c | 39 +-------------------------------------- xen/common/event_channel.c | 3 +-- xen/include/xen/domain.h | 2 +- 10 files changed, 27 insertions(+), 88 deletions(-) diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c index f4a1225..6f45e56 100644 --- a/xen/arch/arm/domain_build.c +++ b/xen/arch/arm/domain_build.c @@ -72,14 +72,8 @@ unsigned int __init dom0_max_vcpus(void) return opt_dom0_max_vcpus; } -struct vcpu *__init alloc_dom0_vcpu0(struct domain *dom0, - unsigned int max_vcpus) +struct vcpu *__init alloc_dom0_vcpu0(struct domain *dom0) { - dom0->vcpu = xzalloc_array(struct vcpu *, max_vcpus); - if ( !dom0->vcpu ) - return NULL; - dom0->max_vcpus = max_vcpus; - return alloc_vcpu(dom0, 0, 0); } diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 72e42e8..a3e1ef7 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -863,7 +863,7 @@ void __init start_xen(unsigned long boot_phys_offset, dom0_cfg.max_vcpus = dom0_max_vcpus(); dom0 = domain_create(0, &dom0_cfg, true); - if ( IS_ERR(dom0) || (alloc_dom0_vcpu0(dom0, dom0_cfg.max_vcpus) == NULL) ) + if ( IS_ERR(dom0) || (alloc_dom0_vcpu0(dom0) == NULL) ) panic("Error creating domain 0"); if ( construct_dom0(dom0) != 0) diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index 7a2c455..5a4f082 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -669,16 +669,7 @@ void vgic_free_virq(struct domain *d, unsigned int virq) unsigned int vgic_max_vcpus(const struct domain *d) { - /* - * Since evtchn_init would call domain_max_vcpus for poll_mask - * allocation when the vgic_ops haven't been initialised yet, - * we return MAX_VIRT_CPUS if d->arch.vgic.handler is null. - */ - if ( !d->arch.vgic.handler ) - return MAX_VIRT_CPUS; - else - return min_t(unsigned int, MAX_VIRT_CPUS, - d->arch.vgic.handler->max_vcpus); + return min_t(unsigned int, MAX_VIRT_CPUS, d->arch.vgic.handler->max_vcpus); } /* diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c index 832632a..4124817 100644 --- a/xen/arch/arm/vgic/vgic.c +++ b/xen/arch/arm/vgic/vgic.c @@ -951,27 +951,7 @@ void vgic_sync_hardware_irq(struct domain *d, unsigned int vgic_max_vcpus(const struct domain *d) { - unsigned int vgic_vcpu_limit; - - switch ( d->arch.vgic.version ) - { - case GIC_INVALID: - /* - * Since evtchn_init would call domain_max_vcpus for poll_mask - * allocation before the VGIC has been initialised, we need to - * return some safe value in this case. As this is for allocation - * purposes, go with the maximum value. - */ - vgic_vcpu_limit = MAX_VIRT_CPUS; - break; - case GIC_V2: - vgic_vcpu_limit = VGIC_V2_MAX_CPUS; - break; - default: - BUG(); - } - - return min_t(unsigned int, MAX_VIRT_CPUS, vgic_vcpu_limit); + return min_t(unsigned int, MAX_VIRT_CPUS, d->arch.vgic.handler->max_vcpus); } #ifdef CONFIG_GICV3 diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c index b42eac3..423fdec 100644 --- a/xen/arch/x86/dom0_build.c +++ b/xen/arch/x86/dom0_build.c @@ -199,17 +199,11 @@ unsigned int __init dom0_max_vcpus(void) return max_vcpus; } -struct vcpu *__init alloc_dom0_vcpu0(struct domain *dom0, - unsigned int max_vcpus) +struct vcpu *__init alloc_dom0_vcpu0(struct domain *dom0) { dom0->node_affinity = dom0_nodes; dom0->auto_node_affinity = !dom0_nr_pxms; - dom0->vcpu = xzalloc_array(struct vcpu *, max_vcpus); - if ( !dom0->vcpu ) - return NULL; - dom0->max_vcpus = max_vcpus; - return dom0_setup_vcpu(dom0, 0, cpumask_last(&dom0_cpus) /* so it wraps around to first pcpu */); } diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index 46dcc71..532aca7 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -1693,7 +1693,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) /* Create initial domain 0. */ dom0 = domain_create(get_initial_domain_id(), &dom0_cfg, !pv_shim); - if ( IS_ERR(dom0) || (alloc_dom0_vcpu0(dom0, dom0_cfg.max_vcpus) == NULL) ) + if ( IS_ERR(dom0) || (alloc_dom0_vcpu0(dom0) == NULL) ) panic("Error creating domain 0"); /* Grab the DOM0 command line. */ diff --git a/xen/common/domain.c b/xen/common/domain.c index 0c44f27..902276d 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -339,6 +339,19 @@ struct domain *domain_create(domid_t domid, if ( !is_idle_domain(d) ) { + /* Check d->max_vcpus and allocate d->vcpu[]. */ + err = -EINVAL; + if ( config->max_vcpus < 1 || + config->max_vcpus > domain_max_vcpus(d) ) + goto fail; + + err = -ENOMEM; + d->vcpu = xzalloc_array(struct vcpu *, config->max_vcpus); + if ( !d->vcpu ) + goto fail; + + d->max_vcpus = config->max_vcpus; + watchdog_domain_init(d); init_status |= INIT_watchdog; @@ -423,6 +436,11 @@ struct domain *domain_create(domid_t domid, sched_destroy_domain(d); + if ( d->max_vcpus ) + { + d->max_vcpus = 0; + XFREE(d->vcpu); + } if ( init_status & INIT_arch ) arch_domain_destroy(d); if ( init_status & INIT_gnttab ) diff --git a/xen/common/domctl.c b/xen/common/domctl.c index 58e51b2..ee0983d 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -554,16 +554,9 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) ret = -EINVAL; if ( (d == current->domain) || /* no domain_pause() */ - (max > domain_max_vcpus(d)) ) + (max != d->max_vcpus) ) /* max_vcpus set up in createdomain */ break; - /* Until Xenoprof can dynamically grow its vcpu-s array... */ - if ( d->xenoprof ) - { - ret = -EAGAIN; - break; - } - /* Needed, for example, to ensure writable p.t. state is synced. */ domain_pause(d); @@ -581,38 +574,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) } } - /* We cannot reduce maximum VCPUs. */ - ret = -EINVAL; - if ( (max < d->max_vcpus) && (d->vcpu[max] != NULL) ) - goto maxvcpu_out; - - /* - * For now don't allow increasing the vcpu count from a non-zero - * value: This code and all readers of d->vcpu would otherwise need - * to be converted to use RCU, but at present there's no tools side - * code path that would issue such a request. - */ - ret = -EBUSY; - if ( (d->max_vcpus > 0) && (max > d->max_vcpus) ) - goto maxvcpu_out; - ret = -ENOMEM; online = cpupool_domain_cpumask(d); - if ( max > d->max_vcpus ) - { - struct vcpu **vcpus; - - BUG_ON(d->vcpu != NULL); - BUG_ON(d->max_vcpus != 0); - - if ( (vcpus = xzalloc_array(struct vcpu *, max)) == NULL ) - goto maxvcpu_out; - - /* Install vcpu array /then/ update max_vcpus. */ - d->vcpu = vcpus; - smp_wmb(); - d->max_vcpus = max; - } for ( i = 0; i < max; i++ ) { diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 41cbbae..381f30e 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -1303,8 +1303,7 @@ int evtchn_init(struct domain *d, unsigned int max_port) evtchn_from_port(d, 0)->state = ECS_RESERVED; #if MAX_VIRT_CPUS > BITS_PER_LONG - d->poll_mask = xzalloc_array(unsigned long, - BITS_TO_LONGS(domain_max_vcpus(d))); + d->poll_mask = xzalloc_array(unsigned long, BITS_TO_LONGS(d->max_vcpus)); if ( !d->poll_mask ) { free_evtchn_bucket(d, d->evtchn); diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h index 651205d..ce31999 100644 --- a/xen/include/xen/domain.h +++ b/xen/include/xen/domain.h @@ -17,7 +17,7 @@ struct vcpu *alloc_vcpu( struct domain *d, unsigned int vcpu_id, unsigned int cpu_id); unsigned int dom0_max_vcpus(void); -struct vcpu *alloc_dom0_vcpu0(struct domain *dom0, unsigned int max_vcpus); +struct vcpu *alloc_dom0_vcpu0(struct domain *dom0); int vcpu_reset(struct vcpu *); int vcpu_up(struct vcpu *v); -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |