[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/4] xen/tmem: Switch to using bool



On Wed, Jun 28, 2017 at 12:16:19PM +0100, Andrew Cooper wrote:
>  * Drop redundant initialisers
>  * Style corrections while changing client_over_quota()
>  * Drop all write-only bools from do_tmem_op()

s/write-only/useless write-only/

> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
> ---
> CC: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
> ---
>  xen/common/tmem.c          | 39 ++++++++++++++++-----------------------
>  xen/common/tmem_control.c  | 16 ++++++++--------
>  xen/common/tmem_xen.c      | 12 ++++++------
>  xen/include/xen/tmem_xen.h | 24 ++++++++++++------------
>  4 files changed, 42 insertions(+), 49 deletions(-)
> 
> diff --git a/xen/common/tmem.c b/xen/common/tmem.c
> index 306dda6..c955cf7 100644
> --- a/xen/common/tmem.c
> +++ b/xen/common/tmem.c
> @@ -71,7 +71,7 @@ struct tmem_page_descriptor {
>      pagesize_t size; /* 0 == PAGE_SIZE (pfp), -1 == data invalid,
>                      else compressed data (cdata). */
>      uint32_t index;
> -    bool_t eviction_attempted;  /* CHANGE TO lifetimes? (settable). */
> +    bool eviction_attempted;  /* CHANGE TO lifetimes? (settable). */
>      union {
>          struct page_info *pfp;  /* Page frame pointer. */
>          char *cdata; /* Compressed data. */
> @@ -884,39 +884,41 @@ static void client_flush(struct client *client)
>      client_free(client);
>  }
>  
> -static bool_t client_over_quota(struct client *client)
> +static bool client_over_quota(const struct client *client)
>  {
>      int total = _atomic_read(tmem_global.client_weight_total);
>  
>      ASSERT(client != NULL);
>      if ( (total == 0) || (client->info.weight == 0) ||
>            (client->eph_count == 0) )
> -        return 0;
> -    return ( ((tmem_global.eph_count*100L) / client->eph_count ) >
> -             ((total*100L) / client->info.weight) );
> +        return false;
> +
> +    return (((tmem_global.eph_count * 100L) / client->eph_count) >
> +            ((total * 100L) / client->info.weight));
>  }
>  
>  /************ MEMORY REVOCATION ROUTINES *******************************/
>  
> -static bool_t tmem_try_to_evict_pgp(struct tmem_page_descriptor *pgp, bool_t 
> *hold_pool_rwlock)
> +static bool tmem_try_to_evict_pgp(struct tmem_page_descriptor *pgp,
> +                                  bool *hold_pool_rwlock)
>  {
>      struct tmem_object_root *obj = pgp->us.obj;
>      struct tmem_pool *pool = obj->pool;
>  
>      if ( pool->is_dying )
> -        return 0;
> +        return false;
>      if ( spin_trylock(&obj->obj_spinlock) )
>      {
>          if ( obj->pgp_count > 1 )
> -            return 1;
> +            return true;
>          if ( write_trylock(&pool->pool_rwlock) )
>          {
>              *hold_pool_rwlock = 1;
> -            return 1;
> +            return true;
>          }
>          spin_unlock(&obj->obj_spinlock);
>      }
> -    return 0;
> +    return false;
>  }
>  
>  int tmem_evict(void)
> @@ -926,7 +928,7 @@ int tmem_evict(void)
>      struct tmem_object_root *obj;
>      struct tmem_pool *pool;
>      int ret = 0;
> -    bool_t hold_pool_rwlock = 0;
> +    bool hold_pool_rwlock = false;
>  
>      tmem_stats.evict_attempts++;
>      spin_lock(&eph_lists_spinlock);
> @@ -995,7 +997,7 @@ int tmem_evict(void)
>   * is a minimum amount of memory (1MB) available BEFORE any data structure
>   * locks are held.
>   */
> -static inline bool_t tmem_ensure_avail_pages(void)
> +static inline bool tmem_ensure_avail_pages(void)
>  {
>      int failed_evict = 10;
>      unsigned long free_mem;
> @@ -1004,12 +1006,12 @@ static inline bool_t tmem_ensure_avail_pages(void)
>          free_mem = (tmem_page_list_pages + total_free_pages())
>                          >> (20 - PAGE_SHIFT);
>          if ( free_mem )
> -            return 1;
> +            return true;
>          if ( !tmem_evict() )
>              failed_evict--;
>      } while ( failed_evict > 0 );
>  
> -    return 0;
> +    return false;
>  }
>  
>  /************ TMEM CORE OPERATIONS ************************************/
> @@ -1879,9 +1881,6 @@ long do_tmem_op(tmem_cli_op_t uops)
>      struct tmem_pool *pool = NULL;
>      struct xen_tmem_oid *oidp;
>      int rc = 0;
> -    bool_t succ_get = 0, succ_put = 0;
> -    bool_t non_succ_get = 0, non_succ_put = 0;
> -    bool_t flush = 0, flush_obj = 0;
>  
>      if ( !tmem_initialized )
>          return -ENODEV;
> @@ -1965,22 +1964,16 @@ long do_tmem_op(tmem_cli_op_t uops)
>                                  tmem_cli_buf_null);
>                  else
>                      rc = -ENOMEM;
> -                if (rc == 1) succ_put = 1;
> -                else non_succ_put = 1;
>                  break;
>              case TMEM_GET_PAGE:
>                  rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
>                                  tmem_cli_buf_null);
> -                if (rc == 1) succ_get = 1;
> -                else non_succ_get = 1;
>                  break;
>              case TMEM_FLUSH_PAGE:
> -                flush = 1;
>                  rc = do_tmem_flush_page(pool, oidp, op.u.gen.index);
>                  break;
>              case TMEM_FLUSH_OBJECT:
>                  rc = do_tmem_flush_object(pool, oidp);
> -                flush_obj = 1;
>                  break;
>              default:
>                  tmem_client_warn("tmem: op %d not implemented\n", op.cmd);
> diff --git a/xen/common/tmem_control.c b/xen/common/tmem_control.c
> index 2d980e3..30bf6fb 100644
> --- a/xen/common/tmem_control.c
> +++ b/xen/common/tmem_control.c
> @@ -19,8 +19,8 @@
>  static int tmemc_freeze_pools(domid_t cli_id, int arg)
>  {
>      struct client *client;
> -    bool_t freeze = (arg == XEN_SYSCTL_TMEM_OP_FREEZE) ? 1 : 0;
> -    bool_t destroy = (arg == XEN_SYSCTL_TMEM_OP_DESTROY) ? 1 : 0;
> +    bool freeze = arg == XEN_SYSCTL_TMEM_OP_FREEZE;
> +    bool destroy = arg == XEN_SYSCTL_TMEM_OP_DESTROY;
>      char *s;
>  
>      s = destroy ? "destroyed" : ( freeze ? "frozen" : "thawed" );
> @@ -96,12 +96,12 @@ static int tmemc_flush_mem(domid_t cli_id, uint32_t kb)
>  #define BSIZE 1024
>  
>  static int tmemc_list_client(struct client *c, tmem_cli_va_param_t buf,
> -                             int off, uint32_t len, bool_t use_long)
> +                             int off, uint32_t len, bool use_long)
>  {
>      char info[BSIZE];
>      int i, n = 0, sum = 0;
>      struct tmem_pool *p;
> -    bool_t s;
> +    bool s;
>  
>      n = scnprintf(info,BSIZE,"C=CI:%d,ww:%d,co:%d,fr:%d,"
>          "Tc:%"PRIu64",Ge:%ld,Pp:%ld,Gp:%ld%c",
> @@ -149,7 +149,7 @@ static int tmemc_list_client(struct client *c, 
> tmem_cli_va_param_t buf,
>  }
>  
>  static int tmemc_list_shared(tmem_cli_va_param_t buf, int off, uint32_t len,
> -                              bool_t use_long)
> +                             bool use_long)
>  {
>      char info[BSIZE];
>      int i, n = 0, sum = 0;
> @@ -188,7 +188,7 @@ static int tmemc_list_shared(tmem_cli_va_param_t buf, int 
> off, uint32_t len,
>  }
>  
>  static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off,
> -                                  uint32_t len, bool_t use_long)
> +                                  uint32_t len, bool use_long)
>  {
>      char info[BSIZE];
>      int n = 0, sum = 0;
> @@ -204,7 +204,7 @@ static int tmemc_list_global_perf(tmem_cli_va_param_t 
> buf, int off,
>  }
>  
>  static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len,
> -                              bool_t use_long)
> +                             bool use_long)
>  {
>      char info[BSIZE];
>      int n = 0, sum = off;
> @@ -238,7 +238,7 @@ static int tmemc_list_global(tmem_cli_va_param_t buf, int 
> off, uint32_t len,
>  }
>  
>  static int tmemc_list(domid_t cli_id, tmem_cli_va_param_t buf, uint32_t len,
> -                               bool_t use_long)
> +                      bool use_long)
>  {
>      struct client *client;
>      int off = 0;
> diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
> index 725ae93..20f74b2 100644
> --- a/xen/common/tmem_xen.c
> +++ b/xen/common/tmem_xen.c
> @@ -14,10 +14,10 @@
>  #include <xen/cpu.h>
>  #include <xen/init.h>
>  
> -bool_t __read_mostly opt_tmem = 0;
> +bool __read_mostly opt_tmem;
>  boolean_param("tmem", opt_tmem);
>  
> -bool_t __read_mostly opt_tmem_compress = 0;
> +bool __read_mostly opt_tmem_compress;
>  boolean_param("tmem_compress", opt_tmem_compress);
>  
>  atomic_t freeable_page_count = ATOMIC_INIT(0);
> @@ -32,14 +32,14 @@ static DEFINE_PER_CPU_READ_MOSTLY(void *, scratch_page);
>  
>  #if defined(CONFIG_ARM)
>  static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
> -                                 struct page_info **pcli_pfp, bool_t 
> cli_write)
> +                                 struct page_info **pcli_pfp, bool cli_write)
>  {
>      ASSERT_UNREACHABLE();
>      return NULL;
>  }
>  
>  static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
> -                                unsigned long cli_mfn, bool_t mark_dirty)
> +                                unsigned long cli_mfn, bool mark_dirty)
>  {
>      ASSERT_UNREACHABLE();
>  }
> @@ -47,7 +47,7 @@ static inline void cli_put_page(void *cli_va, struct 
> page_info *cli_pfp,
>  #include <asm/p2m.h>
>  
>  static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
> -                                 struct page_info **pcli_pfp, bool_t 
> cli_write)
> +                                 struct page_info **pcli_pfp, bool cli_write)
>  {
>      p2m_type_t t;
>      struct page_info *page;
> @@ -72,7 +72,7 @@ static inline void *cli_get_page(xen_pfn_t cmfn, unsigned 
> long *pcli_mfn,
>  }
>  
>  static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
> -                                unsigned long cli_mfn, bool_t mark_dirty)
> +                                unsigned long cli_mfn, bool mark_dirty)
>  {
>      if ( mark_dirty )
>      {
> diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
> index dc5888c..542c0b3 100644
> --- a/xen/include/xen/tmem_xen.h
> +++ b/xen/include/xen/tmem_xen.h
> @@ -35,27 +35,27 @@ extern atomic_t freeable_page_count;
>  extern int tmem_init(void);
>  #define tmem_hash hash_long
>  
> -extern bool_t opt_tmem_compress;
> -static inline bool_t tmem_compression_enabled(void)
> +extern bool opt_tmem_compress;
> +static inline bool tmem_compression_enabled(void)
>  {
>      return opt_tmem_compress;
>  }
>  
>  #ifdef CONFIG_TMEM
> -extern bool_t opt_tmem;
> -static inline bool_t tmem_enabled(void)
> +extern bool opt_tmem;
> +static inline bool tmem_enabled(void)
>  {
>      return opt_tmem;
>  }
>  
>  static inline void tmem_disable(void)
>  {
> -    opt_tmem = 0;
> +    opt_tmem = false;
>  }
>  #else
> -static inline bool_t tmem_enabled(void)
> +static inline bool tmem_enabled(void)
>  {
> -    return 0;
> +    return false;
>  }
>  
>  static inline void tmem_disable(void)
> @@ -266,7 +266,7 @@ struct tmem_global {
>      struct list_head ephemeral_page_list;  /* All pages in ephemeral pools. 
> */
>      struct list_head client_list;
>      struct tmem_pool *shared_pools[MAX_GLOBAL_SHARED_POOLS];
> -    bool_t shared_auth;
> +    bool shared_auth;
>      long eph_count;  /* Atomicity depends on eph_lists_spinlock. */
>      atomic_t client_weight_total;
>  };
> @@ -286,7 +286,7 @@ struct client {
>      domid_t cli_id;
>      xen_tmem_client_t info;
>      /* For save/restore/migration. */
> -    bool_t was_frozen;
> +    bool was_frozen;
>      struct list_head persistent_invalidated_list;
>      struct tmem_page_descriptor *cur_pgp;
>      /* Statistics collection. */
> @@ -307,9 +307,9 @@ struct client {
>  #define is_shared(_p)      (_p->shared)
>  
>  struct tmem_pool {
> -    bool_t shared;
> -    bool_t persistent;
> -    bool_t is_dying;
> +    bool shared;
> +    bool persistent;
> +    bool is_dying;
>      struct client *client;
>      uint64_t uuid[2]; /* 0 for private, non-zero for shared. */
>      uint32_t pool_id;
> -- 
> 2.1.4
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.