[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86: make the dom0_max_vcpus option more flexible
# HG changeset patch # User David Vrabel <david.vrabel@xxxxxxxxxx> # Date 1347359185 -7200 # Node ID d9d4c7ed2fd2683d1928b36a2a07c6a519273d13 # Parent 0a9a4549e6b91dc030c6a3cfdc0963f7959f5692 x86: make the dom0_max_vcpus option more flexible The dom0_max_vcpus command line option only allows the exact number of VCPUs for dom0 to be set. It is not possible to say "up to N VCPUs but no more than the number physically present." Allow a range for the option to set a minimum number of VCPUs, and a maximum which does not exceed the number of PCPUs. For example, with "dom0_max_vcpus=4-8": PCPUs Dom0 VCPUs 2 4 4 4 6 6 8 8 10 8 Existing command lines with "dom0_max_vcpus=N" still work as before (and are equivalent to dom0_max_vcpus=N-N). Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx> Committed-by: Jan Beulich <jbeulich@xxxxxxxx> --- diff -r 0a9a4549e6b9 -r d9d4c7ed2fd2 docs/misc/xen-command-line.markdown --- a/docs/misc/xen-command-line.markdown Tue Sep 11 10:57:36 2012 +0200 +++ b/docs/misc/xen-command-line.markdown Tue Sep 11 12:26:25 2012 +0200 @@ -378,10 +378,33 @@ Specify the bit width of the DMA heap. Specify a list of IO ports to be excluded from dom0 access. ### dom0\_max\_vcpus -> `= <integer>` -Specify the maximum number of vcpus to give to dom0. This defaults -to the number of pcpus on the host. +Either: + +> `= <integer>`. + +The number of VCPUs to give to dom0. This number of VCPUs can be more +than the number of PCPUs on the host. The default is the number of +PCPUs. + +Or: + +> `= <min>-<max>` where `<min>` and `<max>` are integers. + +Gives dom0 a number of VCPUs equal to the number of PCPUs, but always +at least `<min>` and no more than `<max>`. Using `<min>` may give +more VCPUs than PCPUs. `<min>` or `<max>` may be omitted and the +defaults of 1 and unlimited respectively are used instead. + +For example, with `dom0_max_vcpus=4-8`: + + Number of + PCPUs | Dom0 VCPUs + 2 | 4 + 4 | 4 + 6 | 6 + 8 | 8 + 10 | 8 ### dom0\_mem > `= List of ( min:<size> | max:<size> | <size> )` diff -r 0a9a4549e6b9 -r d9d4c7ed2fd2 xen/arch/x86/domain_build.c --- a/xen/arch/x86/domain_build.c Tue Sep 11 10:57:36 2012 +0200 +++ b/xen/arch/x86/domain_build.c Tue Sep 11 12:26:25 2012 +0200 @@ -82,20 +82,40 @@ static void __init parse_dom0_mem(const } custom_param("dom0_mem", parse_dom0_mem); -static unsigned int __initdata opt_dom0_max_vcpus; -integer_param("dom0_max_vcpus", opt_dom0_max_vcpus); +static unsigned int __initdata opt_dom0_max_vcpus_min = 1; +static unsigned int __initdata opt_dom0_max_vcpus_max = UINT_MAX; + +static void __init parse_dom0_max_vcpus(const char *s) +{ + if (*s == '-') /* -M */ + opt_dom0_max_vcpus_max = simple_strtoul(s + 1, &s, 0); + else /* N, N-, or N-M */ + { + opt_dom0_max_vcpus_min = simple_strtoul(s, &s, 0); + if (*s++ == '\0') /* N */ + opt_dom0_max_vcpus_max = opt_dom0_max_vcpus_min; + else if (*s != '\0') /* N-M */ + opt_dom0_max_vcpus_max = simple_strtoul(s, &s, 0); + } +} +custom_param("dom0_max_vcpus", parse_dom0_max_vcpus); struct vcpu *__init alloc_dom0_vcpu0(void) { - if ( opt_dom0_max_vcpus == 0 ) - opt_dom0_max_vcpus = num_cpupool_cpus(cpupool0); - if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS ) - opt_dom0_max_vcpus = MAX_VIRT_CPUS; + unsigned max_vcpus; - dom0->vcpu = xzalloc_array(struct vcpu *, opt_dom0_max_vcpus); + max_vcpus = num_cpupool_cpus(cpupool0); + if ( opt_dom0_max_vcpus_min > max_vcpus ) + max_vcpus = opt_dom0_max_vcpus_min; + if ( opt_dom0_max_vcpus_max < max_vcpus ) + max_vcpus = opt_dom0_max_vcpus_max; + if ( max_vcpus > MAX_VIRT_CPUS ) + max_vcpus = MAX_VIRT_CPUS; + + dom0->vcpu = xzalloc_array(struct vcpu *, max_vcpus); if ( !dom0->vcpu ) return NULL; - dom0->max_vcpus = opt_dom0_max_vcpus; + dom0->max_vcpus = max_vcpus; return alloc_vcpu(dom0, 0, 0); } @@ -185,11 +205,11 @@ static unsigned long __init compute_dom0 unsigned long max_pages = dom0_max_nrpages; /* Reserve memory for further dom0 vcpu-struct allocations... */ - avail -= (opt_dom0_max_vcpus - 1UL) + avail -= (d->max_vcpus - 1UL) << get_order_from_bytes(sizeof(struct vcpu)); /* ...and compat_l4's, if needed. */ if ( is_pv_32on64_domain(d) ) - avail -= opt_dom0_max_vcpus - 1; + avail -= d->max_vcpus - 1; /* Reserve memory for iommu_dom0_init() (rough estimate). */ if ( iommu_enabled ) @@ -889,10 +909,10 @@ int __init construct_dom0( for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ ) shared_info(d, vcpu_info[i].evtchn_upcall_mask) = 1; - printk("Dom0 has maximum %u VCPUs\n", opt_dom0_max_vcpus); + printk("Dom0 has maximum %u VCPUs\n", d->max_vcpus); cpu = cpumask_first(cpupool0->cpu_valid); - for ( i = 1; i < opt_dom0_max_vcpus; i++ ) + for ( i = 1; i < d->max_vcpus; i++ ) { cpu = cpumask_cycle(cpu, cpupool0->cpu_valid); (void)alloc_vcpu(d, i, cpu); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |