[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

RE: [PATCH 3/3] x86/HVM: fold both instances of looking up a hvm_ioreq_vcpu with a request pending



> -----Original Message-----
> From: Jan Beulich <jbeulich@xxxxxxxx>
> Sent: 15 July 2020 13:05
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>; Roger Pau Monné 
> <roger.pau@xxxxxxxxxx>; Wei Liu
> <wl@xxxxxxx>; Paul Durrant <paul@xxxxxxx>
> Subject: [PATCH 3/3] x86/HVM: fold both instances of looking up a 
> hvm_ioreq_vcpu with a request
> pending
> 
> It seems pretty likely that the "break" in the loop getting replaced in
> handle_hvm_io_completion() was meant to exit both nested loops at the
> same time. Re-purpose what has been hvm_io_pending() to hand back the
> struct hvm_ioreq_vcpu instance found, and use it to replace the two
> nested loops.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Yes, much neater...

Reviewed-by: Paul Durrant <paul@xxxxxxx>

> 
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -81,7 +81,8 @@ static ioreq_t *get_ioreq(struct hvm_ior
>      return &p->vcpu_ioreq[v->vcpu_id];
>  }
> 
> -bool hvm_io_pending(struct vcpu *v)
> +static struct hvm_ioreq_vcpu *get_pending_vcpu(const struct vcpu *v,
> +                                               struct hvm_ioreq_server 
> **srvp)
>  {
>      struct domain *d = v->domain;
>      struct hvm_ioreq_server *s;
> @@ -96,11 +97,20 @@ bool hvm_io_pending(struct vcpu *v)
>                                list_entry )
>          {
>              if ( sv->vcpu == v && sv->pending )
> -                return true;
> +            {
> +                if ( srvp )
> +                    *srvp = s;
> +                return sv;
> +            }
>          }
>      }
> 
> -    return false;
> +    return NULL;
> +}
> +
> +bool hvm_io_pending(struct vcpu *v)
> +{
> +    return get_pending_vcpu(v, NULL);
>  }
> 
>  static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
> @@ -165,8 +175,8 @@ bool handle_hvm_io_completion(struct vcp
>      struct domain *d = v->domain;
>      struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
>      struct hvm_ioreq_server *s;
> +    struct hvm_ioreq_vcpu *sv;
>      enum hvm_io_completion io_completion;
> -    unsigned int id;
> 
>      if ( has_vpci(d) && vpci_process_pending(v) )
>      {
> @@ -174,23 +184,9 @@ bool handle_hvm_io_completion(struct vcp
>          return false;
>      }
> 
> -    FOR_EACH_IOREQ_SERVER(d, id, s)
> -    {
> -        struct hvm_ioreq_vcpu *sv;
> -
> -        list_for_each_entry ( sv,
> -                              &s->ioreq_vcpu_list,
> -                              list_entry )
> -        {
> -            if ( sv->vcpu == v && sv->pending )
> -            {
> -                if ( !hvm_wait_for_io(sv, get_ioreq(s, v)) )
> -                    return false;
> -
> -                break;
> -            }
> -        }
> -    }
> +    sv = get_pending_vcpu(v, &s);
> +    if ( sv && !hvm_wait_for_io(sv, get_ioreq(s, v)) )
> +        return false;
> 
>      vio->io_req.state = hvm_ioreq_needs_completion(&vio->io_req) ?
>          STATE_IORESP_READY : STATE_IOREQ_NONE;




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.