[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

RE: [PATCH 05/12] evtchn/sched: reject poll requests for unusable ports



> -----Original Message-----
> From: Xen-devel <xen-devel-bounces@xxxxxxxxxxxxxxxxxxxx> On Behalf Of Jan 
> Beulich
> Sent: 28 September 2020 11:59
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>; George Dunlap 
> <George.Dunlap@xxxxxxxxxxxxx>; Ian
> Jackson <iwj@xxxxxxxxxxxxxx>; Julien Grall <julien@xxxxxxx>; Wei Liu 
> <wl@xxxxxxx>; Stefano Stabellini
> <sstabellini@xxxxxxxxxx>; Dario Faggioli <dfaggioli@xxxxxxxx>
> Subject: [PATCH 05/12] evtchn/sched: reject poll requests for unusable ports
> 
> Before and after XSA-342 there has been an asymmetry in how not really
> usable ports get treated in do_poll(): Ones beyond a certain boundary
> (max_evtchns originally, valid_evtchns subsequently) did get refused
> with -EINVAL, while lower ones were accepted despite there potentially
> being no way to wake the vCPU again from its polling state. Arrange to
> also honor evtchn_usable() output in the decision.
> 
> Requested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Paul Durrant <paul@xxxxxxx>

> 
> --- a/xen/common/sched/core.c
> +++ b/xen/common/sched/core.c
> @@ -1427,13 +1427,13 @@ static long do_poll(struct sched_poll *s
>          if ( __copy_from_guest_offset(&port, sched_poll->ports, i, 1) )
>              goto out;
> 
> -        rc = -EINVAL;
> -        if ( !port_is_valid(d, port) )
> -            goto out;
> -
> -        rc = 0;
> -        if ( evtchn_port_is_pending(d, port) )
> +        rc = evtchn_port_poll(d, port);
> +        if ( rc )
> +        {
> +            if ( rc > 0 )
> +                rc = 0;
>              goto out;
> +        }
>      }
> 
>      if ( sched_poll->nr_ports == 1 )
> --- a/xen/include/xen/event.h
> +++ b/xen/include/xen/event.h
> @@ -240,19 +240,6 @@ static inline bool evtchn_is_pending(con
>      return evtchn_usable(evtchn) && d->evtchn_port_ops->is_pending(d, 
> evtchn);
>  }
> 
> -static inline bool evtchn_port_is_pending(struct domain *d, evtchn_port_t 
> port)
> -{
> -    struct evtchn *evtchn = evtchn_from_port(d, port);
> -    bool rc;
> -    unsigned long flags;
> -
> -    spin_lock_irqsave(&evtchn->lock, flags);
> -    rc = evtchn_is_pending(d, evtchn);
> -    spin_unlock_irqrestore(&evtchn->lock, flags);
> -
> -    return rc;
> -}
> -
>  static inline bool evtchn_is_masked(const struct domain *d,
>                                      const struct evtchn *evtchn)
>  {
> @@ -279,6 +266,24 @@ static inline bool evtchn_is_busy(const
>             d->evtchn_port_ops->is_busy(d, evtchn);
>  }
> 
> +static inline int evtchn_port_poll(struct domain *d, evtchn_port_t port)
> +{
> +    int rc = -EINVAL;
> +
> +    if ( port_is_valid(d, port) )
> +    {
> +        struct evtchn *evtchn = evtchn_from_port(d, port);
> +        unsigned long flags;
> +
> +        spin_lock_irqsave(&evtchn->lock, flags);
> +        if ( evtchn_usable(evtchn) )
> +            rc = evtchn_is_pending(d, evtchn);
> +        spin_unlock_irqrestore(&evtchn->lock, flags);
> +    }
> +
> +    return rc;
> +}
> +
>  static inline int evtchn_port_set_priority(struct domain *d,
>                                             struct evtchn *evtchn,
>                                             unsigned int priority)
> 





 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.