[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v1 1/4] xen: enabling XL to set per-VCPU parameters of a domain for RTDS scheduler



>>> On 07.05.15 at 19:05, <lichong659@xxxxxxxxx> wrote:
> --- a/xen/common/sched_rt.c
> +++ b/xen/common/sched_rt.c
> @@ -1085,6 +1085,9 @@ rt_dom_cntl(
>      struct list_head *iter;
>      unsigned long flags;
>      int rc = 0;
> +    xen_domctl_sched_rtds_params_t *local_sched;
> +    int vcpu_index=0;
> +    int i;

unsigned int

> @@ -1110,6 +1113,67 @@ rt_dom_cntl(
>          }
>          spin_unlock_irqrestore(&prv->lock, flags);
>          break;
> +    case XEN_DOMCTL_SCHEDOP_getvcpuinfo:
> +        op->u.rtds.nr_vcpus = 0;
> +        spin_lock_irqsave(&prv->lock, flags);
> +        list_for_each( iter, &sdom->vcpu )
> +            vcpu_index++;
> +        spin_unlock_irqrestore(&prv->lock, flags);
> +        op->u.rtds.nr_vcpus = vcpu_index;

Does dropping of the lock here and re-acquiring it below really work
race free?

> +        local_sched = xzalloc_array(xen_domctl_sched_rtds_params_t,
> +                vcpu_index);
> +        if( local_sched == NULL )
> +        {
> +            return -ENOMEM;
> +        }

Pointless braces.

> +        vcpu_index = 0;
> +        spin_lock_irqsave(&prv->lock, flags);
> +        list_for_each( iter, &sdom->vcpu )
> +        {
> +            struct rt_vcpu *svc = list_entry(iter, struct rt_vcpu, 
> sdom_elem);
> +
> +            local_sched[vcpu_index].budget = svc->budget / MICROSECS(1);
> +            local_sched[vcpu_index].period = svc->period / MICROSECS(1);
> +            local_sched[vcpu_index].index = vcpu_index;

What use is this index to the caller? I think you rather want to tell it
the vCPU number. That's especially also taking the use case of a
get/set pair into account - unless you tell me that these indexes can
never change, the indexes passed back into the set operation would
risk to have become stale by the time the hypervisor processes the
request.

> +            vcpu_index++;
> +        }
> +        spin_unlock_irqrestore(&prv->lock, flags);
> +        copy_to_guest(op->u.rtds.vcpus, local_sched, vcpu_index);
> +        xfree(local_sched);
> +        rc = 0;
> +        break;
> +    case XEN_DOMCTL_SCHEDOP_putvcpuinfo:
> +        local_sched = xzalloc_array(xen_domctl_sched_rtds_params_t,
> +                op->u.rtds.nr_vcpus);

While above using xzalloc_array() is warranted for security reasons,
I don't see why you wouldn't be able to use xmalloc_array() here.

> +        if( local_sched == NULL )
> +        {
> +            return -ENOMEM;
> +        }
> +        copy_from_guest(local_sched, op->u.rtds.vcpus, op->u.rtds.nr_vcpus);
> +
> +        for( i = 0; i < op->u.rtds.nr_vcpus; i++ )
> +        {
> +            vcpu_index = 0;
> +            spin_lock_irqsave(&prv->lock, flags);
> +            list_for_each( iter, &sdom->vcpu )
> +            {
> +                struct rt_vcpu *svc = list_entry(iter, struct rt_vcpu, 
> sdom_elem);
> +                if ( local_sched[i].index == vcpu_index )
> +                {
> +                    if ( local_sched[i].period <= 0 || local_sched[i].budget 
> <= 0 )
> +                         return -EINVAL;
> +
> +                    svc->period = MICROSECS(local_sched[i].period);
> +                    svc->budget = MICROSECS(local_sched[i].budget);
> +                    break;
> +                }
> +                vcpu_index++;
> +            }
> +            spin_unlock_irqrestore(&prv->lock, flags);
> +        }

Considering a maximum size guest, these two nested loops could
require a couple of million iterations. That's too much without any
preemption checks in the middle.

> --- a/xen/common/schedule.c
> +++ b/xen/common/schedule.c
> @@ -1093,7 +1093,9 @@ long sched_adjust(struct domain *d, struct 
> xen_domctl_scheduler_op *op)
>  
>      if ( (op->sched_id != DOM2OP(d)->sched_id) ||
>           ((op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
> -          (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo)) )
> +          (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo) &&
> +          (op->cmd != XEN_DOMCTL_SCHEDOP_putvcpuinfo) &&
> +          (op->cmd != XEN_DOMCTL_SCHEDOP_getvcpuinfo)) )

Imo this should become a switch now.

> @@ -351,9 +361,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
>  #define XEN_SCHEDULER_ARINC653 7
>  #define XEN_SCHEDULER_RTDS     8
>  
> -/* Set or get info? */
> +/* Set or get info */

Pointless change (and if you really mean to do it, then please such
that the comment in the end matches our coding style).

> @@ -373,8 +386,11 @@ struct xen_domctl_scheduler_op {
>              uint16_t weight;
>          } credit2;
>          struct xen_domctl_sched_rtds {
> -            uint32_t period;
> -            uint32_t budget;
> +            uint64_t period;
> +            uint64_t budget;

This widening of the fields seems both unrelated and unnecessary.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.