[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 05/11] ioreq: add internal ioreq initialization support



> -----Original Message-----
> From: Roger Pau Monne <roger.pau@xxxxxxxxxx>
> Sent: 03 September 2019 17:14
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>; Jan Beulich <jbeulich@xxxxxxxx>; 
> Andrew Cooper
> <Andrew.Cooper3@xxxxxxxxxx>; Wei Liu <wl@xxxxxxx>; Paul Durrant 
> <Paul.Durrant@xxxxxxxxxx>
> Subject: [PATCH v2 05/11] ioreq: add internal ioreq initialization support
> 
> Add support for internal ioreq servers to initialization and
> deinitialization routines, prevent some functions from being executed
> against internal ioreq servers and add guards to only allow internal
> callers to modify internal ioreq servers. External callers (ie: from
> hypercalls) are only allowed to deal with external ioreq servers.
> 
> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> ---
> Changes since v1:
>  - Do not pass an 'internal' parameter to most functions, and instead
>    use the id to key whether an ioreq server is internal or external.
>  - Prevent enabling an internal server without a handler.
> ---
>  xen/arch/x86/hvm/dm.c            |  17 ++-
>  xen/arch/x86/hvm/ioreq.c         | 173 +++++++++++++++++++------------
>  xen/include/asm-x86/hvm/domain.h |   5 +-
>  xen/include/asm-x86/hvm/ioreq.h  |   8 +-
>  4 files changed, 135 insertions(+), 68 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
> index c2fca9f729..6a3682e58c 100644
> --- a/xen/arch/x86/hvm/dm.c
> +++ b/xen/arch/x86/hvm/dm.c
> @@ -417,7 +417,7 @@ static int dm_op(const struct dmop_args *op_args)
>              break;
> 
>          rc = hvm_create_ioreq_server(d, data->handle_bufioreq,
> -                                     &data->id);
> +                                     &data->id, false);
>          break;
>      }
> 
> @@ -450,6 +450,9 @@ static int dm_op(const struct dmop_args *op_args)
>          rc = -EINVAL;
>          if ( data->pad )
>              break;
> +        rc = -EPERM;
> +        if ( hvm_ioreq_is_internal(data->id) )
> +            break;
> 
>          rc = hvm_map_io_range_to_ioreq_server(d, data->id, data->type,
>                                                data->start, data->end);
> @@ -464,6 +467,9 @@ static int dm_op(const struct dmop_args *op_args)
>          rc = -EINVAL;
>          if ( data->pad )
>              break;
> +        rc = -EPERM;
> +        if ( hvm_ioreq_is_internal(data->id) )
> +            break;
> 
>          rc = hvm_unmap_io_range_from_ioreq_server(d, data->id, data->type,
>                                                    data->start, data->end);
> @@ -481,6 +487,9 @@ static int dm_op(const struct dmop_args *op_args)
>          rc = -EOPNOTSUPP;
>          if ( !hap_enabled(d) )
>              break;
> +        rc = -EPERM;
> +        if ( hvm_ioreq_is_internal(data->id) )
> +            break;
> 
>          if ( first_gfn == 0 )
>              rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
> @@ -528,6 +537,9 @@ static int dm_op(const struct dmop_args *op_args)
>          rc = -EINVAL;
>          if ( data->pad )
>              break;
> +        rc = -EPERM;
> +        if ( hvm_ioreq_is_internal(data->id) )
> +            break;
> 
>          rc = hvm_set_ioreq_server_state(d, data->id, !!data->enabled);
>          break;
> @@ -541,6 +553,9 @@ static int dm_op(const struct dmop_args *op_args)
>          rc = -EINVAL;
>          if ( data->pad )
>              break;
> +        rc = -EPERM;
> +        if ( hvm_ioreq_is_internal(data->id) )
> +            break;
> 
>          rc = hvm_destroy_ioreq_server(d, data->id);
>          break;
> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> index 95492bc111..dbc5e6b4c5 100644
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -59,10 +59,11 @@ static struct hvm_ioreq_server *get_ioreq_server(const 
> struct domain *d,
>  /*
>   * Iterate over all possible ioreq servers.
>   *
> - * NOTE: The iteration is backwards such that more recently created
> - *       ioreq servers are favoured in hvm_select_ioreq_server().
> - *       This is a semantic that previously existed when ioreq servers
> - *       were held in a linked list.
> + * NOTE: The iteration is backwards such that internal and more recently
> + *       created external ioreq servers are favoured in
> + *       hvm_select_ioreq_server().
> + *       This is a semantic that previously existed for external servers when
> + *       ioreq servers were held in a linked list.
>   */
>  #define FOR_EACH_IOREQ_SERVER(d, id, s) \
>      for ( (id) = MAX_NR_IOREQ_SERVERS; (id) != 0; ) \
> @@ -70,6 +71,12 @@ static struct hvm_ioreq_server *get_ioreq_server(const 
> struct domain *d,
>              continue; \
>          else
> 
> +#define FOR_EACH_EXTERNAL_IOREQ_SERVER(d, id, s) \
> +    for ( (id) = MAX_NR_EXTERNAL_IOREQ_SERVERS; (id) != 0; ) \
> +        if ( !(s = GET_IOREQ_SERVER(d, --(id))) ) \
> +            continue; \
> +        else
> +
>  static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct vcpu *v)
>  {
>      shared_iopage_t *p = s->ioreq.va;
> @@ -86,7 +93,7 @@ bool hvm_io_pending(struct vcpu *v)
>      struct hvm_ioreq_server *s;
>      unsigned int id;
> 
> -    FOR_EACH_IOREQ_SERVER(d, id, s)
> +    FOR_EACH_EXTERNAL_IOREQ_SERVER(d, id, s)
>      {
>          struct hvm_ioreq_vcpu *sv;
> 
> @@ -190,7 +197,7 @@ bool handle_hvm_io_completion(struct vcpu *v)
>          return false;
>      }
> 
> -    FOR_EACH_IOREQ_SERVER(d, id, s)
> +    FOR_EACH_EXTERNAL_IOREQ_SERVER(d, id, s)
>      {
>          struct hvm_ioreq_vcpu *sv;
> 
> @@ -430,7 +437,7 @@ bool is_ioreq_server_page(struct domain *d, const struct 
> page_info *page)
> 
>      spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
> 
> -    FOR_EACH_IOREQ_SERVER(d, id, s)
> +    FOR_EACH_EXTERNAL_IOREQ_SERVER(d, id, s)
>      {
>          if ( (s->ioreq.page == page) || (s->bufioreq.page == page) )
>          {
> @@ -688,7 +695,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct 
> hvm_ioreq_server *s,
>      return rc;
>  }
> 
> -static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
> +static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s, bool 
> internal)
>  {
>      struct hvm_ioreq_vcpu *sv;
> 
> @@ -697,29 +704,40 @@ static void hvm_ioreq_server_enable(struct 
> hvm_ioreq_server *s)
>      if ( s->enabled )
>          goto done;
> 
> -    hvm_remove_ioreq_gfn(s, false);
> -    hvm_remove_ioreq_gfn(s, true);
> +    if ( !internal )
> +    {
> +        hvm_remove_ioreq_gfn(s, false);
> +        hvm_remove_ioreq_gfn(s, true);
> 
> -    s->enabled = true;
> +        list_for_each_entry ( sv,
> +                              &s->ioreq_vcpu_list,
> +                              list_entry )
> +            hvm_update_ioreq_evtchn(s, sv);
> +    }
> +    else if ( !s->handler )
> +    {
> +        ASSERT_UNREACHABLE();
> +        goto done;
> +    }
> 
> -    list_for_each_entry ( sv,
> -                          &s->ioreq_vcpu_list,
> -                          list_entry )
> -        hvm_update_ioreq_evtchn(s, sv);
> +    s->enabled = true;
> 
>    done:
>      spin_unlock(&s->lock);
>  }
> 
> -static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s)
> +static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s, bool 
> internal)
>  {
>      spin_lock(&s->lock);
> 
>      if ( !s->enabled )
>          goto done;
> 
> -    hvm_add_ioreq_gfn(s, true);
> -    hvm_add_ioreq_gfn(s, false);
> +    if ( !internal )
> +    {
> +        hvm_add_ioreq_gfn(s, true);
> +        hvm_add_ioreq_gfn(s, false);
> +    }
> 
>      s->enabled = false;
> 
> @@ -736,33 +754,39 @@ static int hvm_ioreq_server_init(struct 
> hvm_ioreq_server *s,
>      int rc;
> 
>      s->target = d;
> -
> -    get_knownalive_domain(currd);
> -    s->emulator = currd;
> -
>      spin_lock_init(&s->lock);
> -    INIT_LIST_HEAD(&s->ioreq_vcpu_list);
> -    spin_lock_init(&s->bufioreq_lock);
> -
> -    s->ioreq.gfn = INVALID_GFN;
> -    s->bufioreq.gfn = INVALID_GFN;
> 
>      rc = hvm_ioreq_server_alloc_rangesets(s, id);
>      if ( rc )
>          return rc;
> 
> -    s->bufioreq_handling = bufioreq_handling;
> -
> -    for_each_vcpu ( d, v )
> +    if ( !hvm_ioreq_is_internal(id) )
>      {
> -        rc = hvm_ioreq_server_add_vcpu(s, v);
> -        if ( rc )
> -            goto fail_add;
> +        get_knownalive_domain(currd);
> +
> +        s->emulator = currd;
> +        INIT_LIST_HEAD(&s->ioreq_vcpu_list);
> +        spin_lock_init(&s->bufioreq_lock);
> +
> +        s->ioreq.gfn = INVALID_GFN;
> +        s->bufioreq.gfn = INVALID_GFN;
> +
> +        s->bufioreq_handling = bufioreq_handling;
> +
> +        for_each_vcpu ( d, v )
> +        {
> +            rc = hvm_ioreq_server_add_vcpu(s, v);
> +            if ( rc )
> +                goto fail_add;
> +        }
>      }
> +    else
> +        s->handler = NULL;

The struct is zeroed out so initializing the handler is not necessary.

> 
>      return 0;
> 
>   fail_add:
> +    ASSERT(!hvm_ioreq_is_internal(id));
>      hvm_ioreq_server_remove_all_vcpus(s);
>      hvm_ioreq_server_unmap_pages(s);
> 

I think it would be worthwhile having that ASSERT repeated in the called 
functions, and other functions that only operate on external ioreq servers e.g. 
hvm_ioreq_server_add_vcpu(), hvm_ioreq_server_map_pages(), etc. 

> @@ -772,30 +796,34 @@ static int hvm_ioreq_server_init(struct 
> hvm_ioreq_server *s,
>      return rc;
>  }
> 
> -static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s)
> +static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s, bool 
> internal)
>  {
>      ASSERT(!s->enabled);
> -    hvm_ioreq_server_remove_all_vcpus(s);
> -
> -    /*
> -     * NOTE: It is safe to call both hvm_ioreq_server_unmap_pages() and
> -     *       hvm_ioreq_server_free_pages() in that order.
> -     *       This is because the former will do nothing if the pages
> -     *       are not mapped, leaving the page to be freed by the latter.
> -     *       However if the pages are mapped then the former will set
> -     *       the page_info pointer to NULL, meaning the latter will do
> -     *       nothing.
> -     */
> -    hvm_ioreq_server_unmap_pages(s);
> -    hvm_ioreq_server_free_pages(s);
> 
>      hvm_ioreq_server_free_rangesets(s);
> 
> -    put_domain(s->emulator);
> +    if ( !internal )

Perhaps 'if ( internal ) return;' so as to avoid indenting the code below and 
thus shrink the diff.

> +    {
> +        hvm_ioreq_server_remove_all_vcpus(s);
> +
> +        /*
> +         * NOTE: It is safe to call both hvm_ioreq_server_unmap_pages() and
> +         *       hvm_ioreq_server_free_pages() in that order.
> +         *       This is because the former will do nothing if the pages
> +         *       are not mapped, leaving the page to be freed by the latter.
> +         *       However if the pages are mapped then the former will set
> +         *       the page_info pointer to NULL, meaning the latter will do
> +         *       nothing.
> +         */
> +        hvm_ioreq_server_unmap_pages(s);
> +        hvm_ioreq_server_free_pages(s);
> +
> +        put_domain(s->emulator);
> +    }
>  }
> 
>  int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
> -                            ioservid_t *id)
> +                            ioservid_t *id, bool internal)
>  {
>      struct hvm_ioreq_server *s;
>      unsigned int i;
> @@ -811,7 +839,9 @@ int hvm_create_ioreq_server(struct domain *d, int 
> bufioreq_handling,
>      domain_pause(d);
>      spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
> 
> -    for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
> +    for ( i = (internal ? MAX_NR_EXTERNAL_IOREQ_SERVERS : 0);
> +          i < (internal ? MAX_NR_IOREQ_SERVERS : 
> MAX_NR_EXTERNAL_IOREQ_SERVERS);
> +          i++ )
>      {
>          if ( !GET_IOREQ_SERVER(d, i) )
>              break;
> @@ -821,6 +851,9 @@ int hvm_create_ioreq_server(struct domain *d, int 
> bufioreq_handling,
>      if ( i >= MAX_NR_IOREQ_SERVERS )
>          goto fail;
> 
> +    ASSERT((internal &&
> +            i >= MAX_NR_EXTERNAL_IOREQ_SERVERS && i < MAX_NR_IOREQ_SERVERS) 
> ||
> +           (!internal && i < MAX_NR_EXTERNAL_IOREQ_SERVERS));
>      /*
>       * It is safe to call set_ioreq_server() prior to
>       * hvm_ioreq_server_init() since the target domain is paused.
> @@ -864,20 +897,21 @@ int hvm_destroy_ioreq_server(struct domain *d, 
> ioservid_t id)
>          goto out;
> 
>      rc = -EPERM;
> -    if ( s->emulator != current->domain )
> +    /* NB: internal servers cannot be destroyed. */
> +    if ( hvm_ioreq_is_internal(id) || s->emulator != current->domain )

Shouldn't the test of hvm_ioreq_is_internal(id) simply be an ASSERT? This 
function should only be called from a dm_op(), right?

>          goto out;
> 
>      domain_pause(d);
> 
>      p2m_set_ioreq_server(d, 0, id);
> 
> -    hvm_ioreq_server_disable(s);
> +    hvm_ioreq_server_disable(s, hvm_ioreq_is_internal(id));
> 
>      /*
>       * It is safe to call hvm_ioreq_server_deinit() prior to
>       * set_ioreq_server() since the target domain is paused.
>       */
> -    hvm_ioreq_server_deinit(s);
> +    hvm_ioreq_server_deinit(s, hvm_ioreq_is_internal(id));
>      set_ioreq_server(d, id, NULL);
> 
>      domain_unpause(d);
> @@ -909,7 +943,8 @@ int hvm_get_ioreq_server_info(struct domain *d, 
> ioservid_t id,
>          goto out;
> 
>      rc = -EPERM;
> -    if ( s->emulator != current->domain )
> +    /* NB: don't allow fetching information from internal ioreq servers. */
> +    if ( hvm_ioreq_is_internal(id) || s->emulator != current->domain )

Again here, and several places below.

  Paul

>          goto out;
> 
>      if ( ioreq_gfn || bufioreq_gfn )
> @@ -956,7 +991,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, 
> ioservid_t id,
>          goto out;
> 
>      rc = -EPERM;
> -    if ( s->emulator != current->domain )
> +    if ( hvm_ioreq_is_internal(id) || s->emulator != current->domain )
>          goto out;
> 
>      rc = hvm_ioreq_server_alloc_pages(s);
> @@ -1010,7 +1045,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, 
> ioservid_t id,
>          goto out;
> 
>      rc = -EPERM;
> -    if ( s->emulator != current->domain )
> +    if ( !hvm_ioreq_is_internal(id) && s->emulator != current->domain )
>          goto out;
> 
>      switch ( type )
> @@ -1068,7 +1103,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain 
> *d, ioservid_t id,
>          goto out;
> 
>      rc = -EPERM;
> -    if ( s->emulator != current->domain )
> +    if ( !hvm_ioreq_is_internal(id) && s->emulator != current->domain )
>          goto out;
> 
>      switch ( type )
> @@ -1128,6 +1163,14 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, 
> ioservid_t id,
>      if ( !s )
>          goto out;
> 
> +    /*
> +     * NB: do not support mapping internal ioreq servers to memory types, as
> +     * the current internal ioreq servers don't need this feature and it's 
> not
> +     * been tested.
> +     */
> +    rc = -EINVAL;
> +    if ( hvm_ioreq_is_internal(id) )
> +        goto out;
>      rc = -EPERM;
>      if ( s->emulator != current->domain )
>          goto out;
> @@ -1163,15 +1206,15 @@ int hvm_set_ioreq_server_state(struct domain *d, 
> ioservid_t id,
>          goto out;
> 
>      rc = -EPERM;
> -    if ( s->emulator != current->domain )
> +    if ( !hvm_ioreq_is_internal(id) && s->emulator != current->domain )
>          goto out;
> 
>      domain_pause(d);
> 
>      if ( enabled )
> -        hvm_ioreq_server_enable(s);
> +        hvm_ioreq_server_enable(s, hvm_ioreq_is_internal(id));
>      else
> -        hvm_ioreq_server_disable(s);
> +        hvm_ioreq_server_disable(s, hvm_ioreq_is_internal(id));
> 
>      domain_unpause(d);
> 
> @@ -1190,7 +1233,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, 
> struct vcpu *v)
> 
>      spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
> 
> -    FOR_EACH_IOREQ_SERVER(d, id, s)
> +    FOR_EACH_EXTERNAL_IOREQ_SERVER(d, id, s)
>      {
>          rc = hvm_ioreq_server_add_vcpu(s, v);
>          if ( rc )
> @@ -1202,7 +1245,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, 
> struct vcpu *v)
>      return 0;
> 
>   fail:
> -    while ( id++ != MAX_NR_IOREQ_SERVERS )
> +    while ( id++ != MAX_NR_EXTERNAL_IOREQ_SERVERS )
>      {
>          s = GET_IOREQ_SERVER(d, id);
> 
> @@ -1224,7 +1267,7 @@ void hvm_all_ioreq_servers_remove_vcpu(struct domain 
> *d, struct vcpu *v)
> 
>      spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
> 
> -    FOR_EACH_IOREQ_SERVER(d, id, s)
> +    FOR_EACH_EXTERNAL_IOREQ_SERVER(d, id, s)
>          hvm_ioreq_server_remove_vcpu(s, v);
> 
>      spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
> @@ -1241,13 +1284,13 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
> 
>      FOR_EACH_IOREQ_SERVER(d, id, s)
>      {
> -        hvm_ioreq_server_disable(s);
> +        hvm_ioreq_server_disable(s, hvm_ioreq_is_internal(id));
> 
>          /*
>           * It is safe to call hvm_ioreq_server_deinit() prior to
>           * set_ioreq_server() since the target domain is being destroyed.
>           */
> -        hvm_ioreq_server_deinit(s);
> +        hvm_ioreq_server_deinit(s, hvm_ioreq_is_internal(id));
>          set_ioreq_server(d, id, NULL);
> 
>          xfree(s);
> diff --git a/xen/include/asm-x86/hvm/domain.h 
> b/xen/include/asm-x86/hvm/domain.h
> index 9fbe83f45a..9f92838b6e 100644
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -97,7 +97,10 @@ struct hvm_pi_ops {
>      void (*vcpu_block)(struct vcpu *);
>  };
> 
> -#define MAX_NR_IOREQ_SERVERS 8
> +#define MAX_NR_EXTERNAL_IOREQ_SERVERS 8
> +#define MAX_NR_INTERNAL_IOREQ_SERVERS 1
> +#define MAX_NR_IOREQ_SERVERS \
> +    (MAX_NR_EXTERNAL_IOREQ_SERVERS + MAX_NR_INTERNAL_IOREQ_SERVERS)
> 
>  struct hvm_domain {
>      /* Guest page range used for non-default ioreq servers */
> diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
> index 65491c48d2..c3917aa74d 100644
> --- a/xen/include/asm-x86/hvm/ioreq.h
> +++ b/xen/include/asm-x86/hvm/ioreq.h
> @@ -24,7 +24,7 @@ bool handle_hvm_io_completion(struct vcpu *v);
>  bool is_ioreq_server_page(struct domain *d, const struct page_info *page);
> 
>  int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
> -                            ioservid_t *id);
> +                            ioservid_t *id, bool internal);
>  int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id);
>  int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
>                                unsigned long *ioreq_gfn,
> @@ -54,6 +54,12 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool 
> buffered);
> 
>  void hvm_ioreq_init(struct domain *d);
> 
> +static inline bool hvm_ioreq_is_internal(unsigned int id)
> +{
> +    ASSERT(id < MAX_NR_IOREQ_SERVERS);
> +    return id >= MAX_NR_EXTERNAL_IOREQ_SERVERS;
> +}
> +
>  #endif /* __ASM_X86_HVM_IOREQ_H__ */
> 
>  /*
> --
> 2.22.0

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.