[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] Adjust number of domains in cpupools when destroying domain



On Wed, Nov 12, 2014 at 12:10:02PM +0100, Juergen Gross wrote:
> Commit bac6334b51d9bcfe57ecf4a4cb5288348fcf044a (move domain to
> cpupool0 before destroying it) introduced an error in the accounting
> of cpupools regarding the number of domains. The number of domains
> is nor adjusted when a domain is moved to cpupool0 in kill_domain().

s/nor/not/
> 
> Correct this by introducing a cpupool function doing the move
> instead of open coding it by calling sched_move_domain().
> 
> Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
> Tested-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>
> Reviewed-by: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>
> ---
>  xen/common/cpupool.c    | 47 +++++++++++++++++++++++++++++++++--------------
>  xen/common/domain.c     |  2 +-
>  xen/include/xen/sched.h |  1 +
>  3 files changed, 35 insertions(+), 15 deletions(-)
> 
> diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
> index 73249d3..a758a8b 100644
> --- a/xen/common/cpupool.c
> +++ b/xen/common/cpupool.c
> @@ -225,6 +225,35 @@ static int cpupool_destroy(struct cpupool *c)
>  }
>  
>  /*
> + * Move domain to another cpupool
> + */
> +static int cpupool_move_domain_locked(struct domain *d, struct cpupool *c)
> +{
> +    int ret;
> +
> +    d->cpupool->n_dom--;
> +    ret = sched_move_domain(d, c);
> +    if ( ret )
> +        d->cpupool->n_dom++;
> +    else
> +        c->n_dom++;
> +
> +    return ret;
> +}

\n ?

> +int cpupool_move_domain(struct domain *d, struct cpupool *c)
> +{
> +    int ret;
> +
> +    spin_lock(&cpupool_lock);
> +
> +    ret = cpupool_move_domain_locked(d, c);
> +
> +    spin_unlock(&cpupool_lock);
> +
> +    return ret;
> +}
> +
> +/*
>   * assign a specific cpu to a cpupool
>   * cpupool_lock must be held
>   */
> @@ -338,14 +367,9 @@ static int cpupool_unassign_cpu(struct cpupool *c, 
> unsigned int cpu)
>                  ret = -EBUSY;
>                  break;
>              }
> -            c->n_dom--;
> -            ret = sched_move_domain(d, cpupool0);
> +            ret = cpupool_move_domain_locked(d, cpupool0);
>              if ( ret )
> -            {
> -                c->n_dom++;
>                  break;
> -            }
> -            cpupool0->n_dom++;
>          }
>          rcu_read_unlock(&domlist_read_lock);
>          if ( ret )
> @@ -613,16 +637,11 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
>                          d->domain_id, op->cpupool_id);
>          ret = -ENOENT;
>          spin_lock(&cpupool_lock);
> +
>          c = cpupool_find_by_id(op->cpupool_id);
>          if ( (c != NULL) && cpumask_weight(c->cpu_valid) )
> -        {
> -            d->cpupool->n_dom--;
> -            ret = sched_move_domain(d, c);
> -            if ( ret )
> -                d->cpupool->n_dom++;
> -            else
> -                c->n_dom++;
> -        }
> +            ret = cpupool_move_domain_locked(d, c);
> +
>          spin_unlock(&cpupool_lock);
>          cpupool_dprintk("cpupool move_domain(dom=%d)->pool=%d ret %d\n",
>                          d->domain_id, op->cpupool_id, ret);
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index a3f51ec..4a62c1d 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -621,7 +621,7 @@ int domain_kill(struct domain *d)
>                  rc = -EAGAIN;
>              break;
>          }
> -        if ( sched_move_domain(d, cpupool0) )
> +        if ( cpupool_move_domain(d, cpupool0) )
>              return -EAGAIN;
>          for_each_vcpu ( d, v )
>              unmap_vcpu_info(v);
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index c5157e6..46fc6e3 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -871,6 +871,7 @@ struct cpupool *cpupool_get_by_id(int poolid);
>  void cpupool_put(struct cpupool *pool);
>  int cpupool_add_domain(struct domain *d, int poolid);
>  void cpupool_rm_domain(struct domain *d);
> +int cpupool_move_domain(struct domain *d, struct cpupool *c);
>  int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
>  void schedule_dump(struct cpupool *c);
>  extern void dump_runq(unsigned char key);
> -- 
> 2.1.2
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.