[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v1 12/12] tmem: Batch and squash XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_[FLAGS, NPAGES, UUID] in one sub-call: XEN_SYSCTL_TMEM_OP_GET_POOLS.



>>> On 28.09.16 at 11:42, <konrad.wilk@xxxxxxxxxx> wrote:
> @@ -1592,6 +1594,7 @@ static int do_tmem_new_pool(domid_t this_cli_id,
>  
>  out:
>      tmem_client_info("pool_id=%d\n", d_poolid);
> +    client->info.nr_pools ++;

Stray blank?

> --- a/xen/common/tmem_control.c
> +++ b/xen/common/tmem_control.c
> @@ -276,6 +276,8 @@ static int __tmemc_set_var(struct client *client,
>      if ( info.maxpools > MAX_POOLS_PER_DOMAIN )
>          return -ERANGE;
>  
> +    /* Ignore info.nr_pools. */
> +
>      if ( info.weight != client->info.weight )
>      {
>          old_weight = client->info.weight;
> @@ -342,46 +344,63 @@ static int tmemc_get_client_info(int cli_id,
>      return 0;
>  }
>  
> -static int tmemc_save_subop(int cli_id, uint32_t pool_id, uint32_t subop,
> -                            XEN_GUEST_HANDLE_PARAM(void) buf, uint32_t arg)
> +static int tmemc_get_pool(int cli_id,
> +                          
> XEN_GUEST_HANDLE_PARAM(xen_sysctl_tmem_pool_info_t) pools,
> +                          uint32_t len)
>  {
>      struct client *client = tmem_client_from_cli_id(cli_id);
> -    struct tmem_pool *pool = (client == NULL || pool_id >= 
> MAX_POOLS_PER_DOMAIN)
> -                   ? NULL : client->pools[pool_id];
> -    int rc = -1;
> +    unsigned int i, idx;
> +    int rc = 0;
> +    unsigned int nr = len / sizeof(xen_sysctl_tmem_pool_info_t);
> +
> +    if ( len % sizeof(xen_sysctl_tmem_pool_info_t) )
> +        return -EINVAL;
> +
> +    if ( nr >= MAX_POOLS_PER_DOMAIN )
> +        return -E2BIG;

>= seems one off here.

> +    if ( !guest_handle_okay(pools, nr) )
> +        return -EINVAL;
>  
> -    switch(subop)
> +    if ( !client )
> +        return -EINVAL;
> +
> +    for ( idx = 0, i = 0; i < MAX_POOLS_PER_DOMAIN; i++ )
>      {
> -    case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS:
> -         if ( pool == NULL )
> -             break;
> -         rc = (pool->persistent ? TMEM_POOL_PERSIST : 0) |
> +        struct tmem_pool *pool = client->pools[i];
> +        xen_sysctl_tmem_pool_info_t out;
> +
> +        if ( pool == NULL )
> +            continue;
> +
> +        out.flags.raw = (pool->persistent ? TMEM_POOL_PERSIST : 0) |
>                (pool->shared ? TMEM_POOL_SHARED : 0) |
>                (POOL_PAGESHIFT << TMEM_POOL_PAGESIZE_SHIFT) |
>                (TMEM_SPEC_VERSION << TMEM_POOL_VERSION_SHIFT);
> -        break;
> -    case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES:
> -         if ( pool == NULL )
> -             break;
> -        rc = _atomic_read(pool->pgp_count);
> -        break;
> -    case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID:
> -         if ( pool == NULL )
> -             break;
> -        rc = 0;
> -        if ( copy_to_guest(guest_handle_cast(buf, void), pool->uuid, 2) )
> +        out.n_pages = _atomic_read(pool->pgp_count);
> +        out.uuid[0] = pool->uuid[0];
> +        out.uuid[1] = pool->uuid[1];
> +        out.id = i;
> +
> +        /* N.B. 'idx' != 'i'. */
> +        if ( __copy_to_guest_offset(pools, idx, &out, 1) )
> +        {
>              rc = -EFAULT;
> -        break;
> -    default:
> -        rc = -1;
> +            break;
> +        }
> +        idx ++;

Stray blank again.

> --- a/xen/include/public/sysctl.h
> +++ b/xen/include/public/sysctl.h
> @@ -760,9 +760,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
>  #define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN             10
>  #define XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO        11
>  #define XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO        12
> -#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS    16
> -#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES   17
> -#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID     18
> +#define XEN_SYSCTL_TMEM_OP_GET_POOLS              16

The latest here - when you re-assign meaning - bumping
XEN_SYSCTL_INTERFACE_VERSION becomes unavoidable.

> @@ -803,6 +802,31 @@ struct tmem_client {
>  typedef struct tmem_client xen_sysctl_tmem_client_t;
>  DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tmem_client_t);
>  
> +/*
> + * XEN_SYSCTL_TMEM_OP_GET_POOLS uses the 'pool' array in
> + * xen_sysctl_tmem_op with this structure. The hypercall will
> + * return the number of entries in 'pool' or a negative value
> + * if an error was encountered.
> + */
> +struct tmem_pool_info {
> +    union {
> +        uint32_t raw;
> +        struct {
> +            uint32_t persist:1,    /* See TMEM_POOL_PERSIST. */
> +                     shared:1,     /* See TMEM_POOL_SHARED. */
> +                     rsv:2,
> +                     pagebits:8,   /* TMEM_POOL_PAGESIZE_[SHIFT,MASK]. */
> +                     rsv2:12,
> +                     version:8;    /* TMEM_POOL_VERSION_[SHIFT,MASK]. */
> +        } u;
> +    } flags;
> +    uint32_t id;                  /* Less than tmem_client.maxpools. */
> +    uint64_t n_pages;
> +    uint64_t uuid[2];

uint64_aligned_t

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.