[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH RFC V2 24/45] xen: let vcpu_create() select processor



>>> On 06.05.19 at 08:56, <jgross@xxxxxxxx> wrote:
> --- a/xen/common/schedule.c
> +++ b/xen/common/schedule.c
> @@ -314,14 +314,42 @@ static struct sched_item *sched_alloc_item(struct vcpu 
> *v)
>      return NULL;
>  }
>  
> -int sched_init_vcpu(struct vcpu *v, unsigned int processor)
> +static unsigned int sched_select_initial_cpu(struct vcpu *v)
> +{
> +    struct domain *d = v->domain;

const (perhaps also the function parameter)?

> +    nodeid_t node;
> +    cpumask_t cpus;

To be honest, I'm not happy to see new on-stack instances of
cpumask_t appear. Seeing ...

> +    cpumask_clear(&cpus);
> +    for_each_node_mask ( node, d->node_affinity )
> +        cpumask_or(&cpus, &cpus, &node_to_cpumask(node));
> +    cpumask_and(&cpus, &cpus, cpupool_domain_cpumask(d));
> +    if ( cpumask_empty(&cpus) )
> +        cpumask_copy(&cpus, cpupool_domain_cpumask(d));

... this fallback you use anyway, is there any issue with it also
serving the case where zalloc_cpumask_var() fails?

> +    if ( v->vcpu_id == 0 )
> +        return cpumask_first(&cpus);
> +
> +    /* We can rely on previous vcpu being available. */
> +    ASSERT(!is_idle_domain(d));
> +
> +    return cpumask_cycle(d->vcpu[v->vcpu_id - 1]->processor, &cpus);
> +}
> +
> +int sched_init_vcpu(struct vcpu *v)
>  {
>      struct domain *d = v->domain;
>      struct sched_item *item;
> +    unsigned int processor;
>  
>      if ( (item = sched_alloc_item(v)) == NULL )
>          return 1;
>  
> +    if ( is_idle_domain(d) )
> +        processor = v->vcpu_id;
> +    else
> +        processor = sched_select_initial_cpu(v);
> +
>      sched_set_res(item, per_cpu(sched_res, processor));
>  
>      /* Initialise the per-vcpu timers. */
> @@ -1673,7 +1701,7 @@ static int cpu_schedule_up(unsigned int cpu)
>          return 0;
>  
>      if ( idle_vcpu[cpu] == NULL )
> -        vcpu_create(idle_vcpu[0]->domain, cpu, cpu);
> +        vcpu_create(idle_vcpu[0]->domain, cpu);
>      else
>      {
>          struct vcpu *idle = idle_vcpu[cpu];
> @@ -1867,7 +1895,7 @@ void __init scheduler_init(void)
>      BUG_ON(nr_cpu_ids > ARRAY_SIZE(idle_vcpu));
>      idle_domain->vcpu = idle_vcpu;
>      idle_domain->max_vcpus = nr_cpu_ids;
> -    if ( vcpu_create(idle_domain, 0, 0) == NULL )
> +    if ( vcpu_create(idle_domain, 0) == NULL )
>          BUG();
>      this_cpu(sched_res)->curr = idle_vcpu[0]->sched_item;
>      this_cpu(sched_res)->sched_priv = sched_alloc_pdata(&ops, 0);
> diff --git a/xen/include/asm-x86/dom0_build.h 
> b/xen/include/asm-x86/dom0_build.h
> index 33a5483739..3eb4b036e1 100644
> --- a/xen/include/asm-x86/dom0_build.h
> +++ b/xen/include/asm-x86/dom0_build.h
> @@ -11,8 +11,7 @@ extern unsigned int dom0_memflags;
>  unsigned long dom0_compute_nr_pages(struct domain *d,
>                                      struct elf_dom_parms *parms,
>                                      unsigned long initrd_len);
> -struct vcpu *dom0_setup_vcpu(struct domain *d, unsigned int vcpu_id,
> -                             unsigned int cpu);
> +struct vcpu *dom0_setup_vcpu(struct domain *d, unsigned int vcpu_id);
>  int dom0_setup_permissions(struct domain *d);
>  
>  int dom0_construct_pv(struct domain *d, const module_t *image,
> diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
> index d1bfc82f57..a6e929685c 100644
> --- a/xen/include/xen/domain.h
> +++ b/xen/include/xen/domain.h
> @@ -13,8 +13,7 @@ typedef union {
>      struct compat_vcpu_guest_context *cmp;
>  } vcpu_guest_context_u __attribute__((__transparent_union__));
>  
> -struct vcpu *vcpu_create(
> -    struct domain *d, unsigned int vcpu_id, unsigned int cpu_id);
> +struct vcpu *vcpu_create(struct domain *d, unsigned int vcpu_id);
>  
>  unsigned int dom0_max_vcpus(void);
>  struct vcpu *alloc_dom0_vcpu0(struct domain *dom0);
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index da117365af..8052f98780 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -663,7 +663,7 @@ void __domain_crash(struct domain *d);
>  void noreturn asm_domain_crash_synchronous(unsigned long addr);
>  
>  void scheduler_init(void);
> -int  sched_init_vcpu(struct vcpu *v, unsigned int processor);
> +int  sched_init_vcpu(struct vcpu *v);
>  void sched_destroy_vcpu(struct vcpu *v);
>  int  sched_init_domain(struct domain *d, int poolid);
>  void sched_destroy_domain(struct domain *d);
> -- 
> 2.16.4




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.