[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 08/15] tmem: cleanup: drop runtime statistics



Tmem collects a lot of statistics and list them through tmemc_list().
But those statistics are mess, they are not well defined and unreadable.

This patch removes all of those statistics and leaves tmemc_list()
unimplemented so that it will be eaiser to do the clean up work.
Once tmem code is clean and good enough, really needed statistics will be
added back.

Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
---
 xen/common/tmem.c          |  368 ++------------------------------------------
 xen/include/xen/tmem_xen.h |    8 -
 2 files changed, 15 insertions(+), 361 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index c31141c..205ee95 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -26,29 +26,7 @@
 
 #define TMEM_SPEC_VERSION 1
 
-/* global statistics (none need to be locked) */
-static unsigned long total_tmem_ops = 0;
-static unsigned long errored_tmem_ops = 0;
-static unsigned long total_flush_pool = 0;
-static unsigned long alloc_failed = 0, alloc_page_failed = 0;
-static unsigned long evicted_pgs = 0, evict_attempts = 0;
-static unsigned long relinq_pgs = 0, relinq_attempts = 0;
-static unsigned long max_evicts_per_relinq = 0;
-static unsigned long low_on_memory = 0;
-static unsigned long deduped_puts = 0;
-static unsigned long tot_good_eph_puts = 0;
-static int global_obj_count_max = 0;
-static int global_pgp_count_max = 0;
-static int global_pcd_count_max = 0;
-static int global_page_count_max = 0;
-static int global_rtree_node_count_max = 0;
-static long global_eph_count_max = 0;
-static unsigned long failed_copies;
-static unsigned long pcd_tot_tze_size = 0;
-static unsigned long pcd_tot_csize = 0;
-
 /************ CORE DATA STRUCTURES ************************************/
-
 #define MAX_POOLS_PER_DOMAIN 16
 #define MAX_GLOBAL_SHARED_POOLS  16
 
@@ -61,7 +39,7 @@ struct client {
     struct domain *domain;
     struct xmem_pool *persistent_pool;
     struct list_head ephemeral_page_list;
-    long eph_count, eph_count_max;
+    long eph_count;
     domid_t cli_id;
     uint32_t weight;
     uint32_t cap;
@@ -73,12 +51,6 @@ struct client {
     bool_t was_frozen;
     struct list_head persistent_invalidated_list;
     struct tmem_page_descriptor *cur_pgp;
-    /* statistics collection */
-    unsigned long compress_poor, compress_nomem;
-    unsigned long compressed_pages;
-    uint64_t compressed_sum_size;
-    uint64_t total_cycles;
-    unsigned long succ_pers_puts, succ_eph_gets, succ_pers_gets;
     /* shared pool authentication */
     uint64_t shared_auth_uuid[MAX_GLOBAL_SHARED_POOLS][2];
 };
@@ -109,17 +81,6 @@ struct tmem_pool {
     struct tmem_page_descriptor *cur_pgp;
     /* statistics collection */
     atomic_t pgp_count;
-    int pgp_count_max;
-    long obj_count;  /* atomicity depends on pool_rwlock held for write */
-    long obj_count_max;  
-    unsigned long objnode_count, objnode_count_max;
-    uint64_t sum_life_cycles;
-    uint64_t sum_evicted_cycles;
-    unsigned long puts, good_puts, no_mem_puts;
-    unsigned long dup_puts_flushed, dup_puts_replaced;
-    unsigned long gets, found_gets;
-    unsigned long flushs, flushs_found;
-    unsigned long flush_objs, flush_objs_found;
 };
 
 #define is_persistent(_p)  (_p->persistent)
@@ -132,7 +93,6 @@ struct oid {
 struct tmem_object_root {
     struct oid oid;
     struct rb_node rb_tree_node; /* protected by pool->pool_rwlock */
-    unsigned long objnode_count; /* atomicity depends on obj_spinlock */
     long pgp_count; /* atomicity depends on obj_spinlock */
     struct radix_tree_root tree_root; /* tree of pages within object */
     struct tmem_pool *pool;
@@ -198,7 +158,6 @@ struct rb_root pcd_tree_roots[256]; /* choose based on 
first byte of page */
 rwlock_t pcd_tree_rwlocks[256]; /* poor man's concurrency for now */
 
 static LIST_HEAD(global_ephemeral_page_list); /* all pages in ephemeral pools 
*/
-
 static LIST_HEAD(global_client_list);
 static LIST_HEAD(global_pool_list);
 
@@ -208,7 +167,6 @@ static atomic_t client_weight_total = ATOMIC_INIT(0);
 static int tmem_initialized = 0;
 
 /************ CONCURRENCY  ***********************************************/
-
 DEFINE_SPINLOCK(tmem_spinlock);  /* used iff tmem_lock_all */
 DEFINE_RWLOCK(tmem_rwlock);      /* used iff !tmem_lock_all */
 static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */
@@ -228,23 +186,6 @@ static DEFINE_SPINLOCK(pers_lists_spinlock);
 
 /* global counters (should use long_atomic_t access) */
 static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock */
-static atomic_t global_obj_count = ATOMIC_INIT(0);
-static atomic_t global_pgp_count = ATOMIC_INIT(0);
-static atomic_t global_pcd_count = ATOMIC_INIT(0);
-static atomic_t global_page_count = ATOMIC_INIT(0);
-static atomic_t global_rtree_node_count = ATOMIC_INIT(0);
-
-#define atomic_inc_and_max(_c) do { \
-    atomic_inc(&_c); \
-    if ( _atomic_read(_c) > _c##_max ) \
-        _c##_max = _atomic_read(_c); \
-} while (0)
-
-#define atomic_dec_and_assert(_c) do { \
-    atomic_dec(&_c); \
-    ASSERT(_atomic_read(_c) >= 0); \
-} while (0)
-
 
 /************ MEMORY ALLOCATION INTERFACE *****************************/
 static void *tmem_malloc(size_t size, struct tmem_pool *pool)
@@ -261,8 +202,6 @@ static void *tmem_malloc(size_t size, struct tmem_pool 
*pool)
         ASSERT( tmem_mempool != NULL );
         v = xmem_pool_alloc(size, tmem_mempool);
     }
-    if ( v == NULL )
-        alloc_failed++;
     return v;
 }
 
@@ -288,10 +227,6 @@ static struct page_info *tmem_page_alloc(struct tmem_pool 
*pool)
         pfp = tmem_alloc_page_thispool(pool->client->domain);
     else
         pfp = tmem_alloc_page(pool,0);
-    if ( pfp == NULL )
-        alloc_page_failed++;
-    else
-        atomic_inc_and_max(global_page_count);
     return pfp;
 }
 
@@ -302,7 +237,6 @@ static void tmem_page_free(struct tmem_pool *pool, struct 
page_info *pfp)
         tmem_free_page(pfp);
     else
         tmem_free_page_thispool(pfp);
-    atomic_dec_and_assert(global_page_count);
 }
 
 /************ PAGE CONTENT DESCRIPTOR MANIPULATION ROUTINES ***********/
@@ -341,7 +275,6 @@ static void pcd_disassociate(struct tmem_page_descriptor 
*pgp, struct tmem_pool
     pagesize_t pcd_size = pcd->size;
     pagesize_t pgp_size = pgp->size;
     char *pcd_cdata = pgp->pcd->cdata;
-    pagesize_t pcd_csize = pgp->pcd->size;
 
     ASSERT(tmem_dedup_enabled());
     ASSERT(firstbyte != NOT_SHAREABLE);
@@ -370,25 +303,18 @@ static void pcd_disassociate(struct tmem_page_descriptor 
*pgp, struct tmem_pool
     RB_CLEAR_NODE(&pcd->pcd_rb_tree_node);
     /* now free up the pcd memory */
     tmem_free(pcd, NULL);
-    atomic_dec_and_assert(global_pcd_count);
     if ( pgp_size != 0 && pcd_size < PAGE_SIZE )
     {
         /* compressed data */
         tmem_free(pcd_cdata, pool);
-        pcd_tot_csize -= pcd_csize;
     }
     else if ( pcd_size != PAGE_SIZE )
     {
         /* trailing zero data */
-        pcd_tot_tze_size -= pcd_size;
         if ( pcd_size )
             tmem_free(pcd_tze, pool);
     } else {
         /* real physical page */
-        if ( tmem_tze_enabled() )
-            pcd_tot_tze_size -= PAGE_SIZE;
-        if ( tmem_compression_enabled() )
-            pcd_tot_csize -= PAGE_SIZE;
         tmem_page_free(pool,pfp);
     }
     tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]);
@@ -469,7 +395,6 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, 
char *cdata, pagesize
             /* but if compressed, data is assumed static so don't free! */
             if ( cdata == NULL )
                 tmem_page_free(pgp->us.obj->pool,pgp->pfp);
-            deduped_puts++;
             goto match;
         }
     }
@@ -487,7 +412,6 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, 
char *cdata, pagesize
             goto unlock;
         }
     }
-    atomic_inc_and_max(global_pcd_count);
     RB_CLEAR_NODE(&pcd->pcd_rb_tree_node);  /* is this necessary */
     INIT_LIST_HEAD(&pcd->pgp_list);  /* is this necessary */
     pcd->pgp_ref_count = 0;
@@ -495,7 +419,6 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, 
char *cdata, pagesize
     {
         memcpy(pcd->cdata,cdata,csize);
         pcd->size = csize;
-        pcd_tot_csize += csize;
     } else if ( pfp_size == 0 ) {
         ASSERT(tmem_tze_enabled());
         pcd->size = 0;
@@ -504,15 +427,10 @@ static int pcd_associate(struct tmem_page_descriptor 
*pgp, char *cdata, pagesize
          ((pcd->tze = tmem_malloc(pfp_size,pgp->us.obj->pool)) != NULL) ) {
         tmem_tze_copy_from_pfp(pcd->tze,pgp->pfp,pfp_size);
         pcd->size = pfp_size;
-        pcd_tot_tze_size += pfp_size;
         tmem_page_free(pgp->us.obj->pool,pgp->pfp);
     } else {
         pcd->pfp = pgp->pfp;
         pcd->size = PAGE_SIZE;
-        if ( tmem_tze_enabled() )
-            pcd_tot_tze_size += PAGE_SIZE;
-        if ( tmem_compression_enabled() )
-            pcd_tot_csize += PAGE_SIZE;
     }
     rb_link_node(&pcd->pcd_rb_tree_node, parent, new);
     rb_insert_color(&pcd->pcd_rb_tree_node, root);
@@ -555,8 +473,7 @@ static struct tmem_page_descriptor *pgp_alloc(struct 
tmem_object_root *obj)
     pgp->size = -1;
     pgp->index = -1;
     pgp->timestamp = get_cycles();
-    atomic_inc_and_max(global_pgp_count);
-    atomic_inc_and_max(pool->pgp_count);
+    atomic_inc(&pool->pgp_count);
     return pgp;
 }
 
@@ -580,11 +497,6 @@ static void pgp_free_data(struct tmem_page_descriptor 
*pgp, struct tmem_pool *po
         tmem_free(pgp->cdata, pool);
     else
         tmem_page_free(pgp->us.obj->pool,pgp->pfp);
-    if ( pool != NULL && pgp_size )
-    {
-        pool->client->compressed_pages--;
-        pool->client->compressed_sum_size -= pgp_size;
-    }
     pgp->pfp = NULL;
     pgp->size = -1;
 }
@@ -605,8 +517,7 @@ static void pgp_free(struct tmem_page_descriptor *pgp, int 
from_delete)
         ASSERT(list_empty(&pgp->us.client_eph_pages));
     }
     pgp_free_data(pgp, pool);
-    atomic_dec_and_assert(global_pgp_count);
-    atomic_dec_and_assert(pool->pgp_count);
+    atomic_dec(&pool->pgp_count);
     pgp->size = -1;
     if ( is_persistent(pool) && pool->client->live_migrating )
     {
@@ -678,7 +589,6 @@ static void pgp_delete(struct tmem_page_descriptor *pgp, 
bool_t no_eph_lock)
     ASSERT(pgp->us.obj != NULL);
     ASSERT(pgp->us.obj->pool != NULL);
     life = get_cycles() - pgp->timestamp;
-    pgp->us.obj->pool->sum_life_cycles += life;
     pgp_delist(pgp, no_eph_lock);
     pgp_free(pgp,1);
 }
@@ -736,10 +646,6 @@ static struct radix_tree_node *rtn_alloc(void *arg)
         return NULL;
     objnode->obj = obj;
     memset(&objnode->rtn, 0, sizeof(struct radix_tree_node));
-    if (++obj->pool->objnode_count > obj->pool->objnode_count_max)
-        obj->pool->objnode_count_max = obj->pool->objnode_count;
-    atomic_inc_and_max(global_rtree_node_count);
-    obj->objnode_count++;
     return &objnode->rtn;
 }
 
@@ -755,11 +661,8 @@ static void rtn_free(struct radix_tree_node *rtn, void 
*arg)
     ASSERT_SPINLOCK(&objnode->obj->obj_spinlock);
     pool = objnode->obj->pool;
     ASSERT(pool != NULL);
-    pool->objnode_count--;
-    objnode->obj->objnode_count--;
     objnode->obj = NULL;
     tmem_free(objnode, pool);
-    atomic_dec_and_assert(global_rtree_node_count);
 }
 
 /************ POOL OBJECT COLLECTION MANIPULATION ROUTINES *******************/
@@ -852,15 +755,11 @@ static void obj_free(struct tmem_object_root *obj, int 
no_rebalance)
     ASSERT_WRITELOCK(&pool->pool_rwlock);
     if ( obj->tree_root.rnode != NULL ) /* may be a "stump" with no leaves */
         radix_tree_destroy(&obj->tree_root, pgp_destroy);
-    ASSERT((long)obj->objnode_count == 0);
     ASSERT(obj->tree_root.rnode == NULL);
-    pool->obj_count--;
-    ASSERT(pool->obj_count >= 0);
     obj->pool = NULL;
     old_oid = obj->oid;
     oid_set_invalid(&obj->oid);
     obj->last_client = TMEM_CLI_ID_NULL;
-    atomic_dec_and_assert(global_obj_count);
     /* use no_rebalance only if all objects are being destroyed anyway */
     if ( !no_rebalance )
         rb_erase(&obj->rb_tree_node,&pool->obj_rb_root[oid_hash(&old_oid)]);
@@ -907,16 +806,11 @@ static struct tmem_object_root * obj_new(struct tmem_pool 
*pool, struct oid *oid
     ASSERT_WRITELOCK(&pool->pool_rwlock);
     if ( (obj = tmem_malloc(sizeof(struct tmem_object_root), pool)) == NULL )
         return NULL;
-    pool->obj_count++;
-    if (pool->obj_count > pool->obj_count_max)
-        pool->obj_count_max = pool->obj_count;
-    atomic_inc_and_max(global_obj_count);
     radix_tree_init(&obj->tree_root);
     radix_tree_set_alloc_callbacks(&obj->tree_root, rtn_alloc, rtn_free, obj);
     spin_lock_init(&obj->obj_spinlock);
     obj->pool = pool;
     obj->oid = *oidp;
-    obj->objnode_count = 0;
     obj->pgp_count = 0;
     obj->last_client = TMEM_CLI_ID_NULL;
     tmem_spin_lock(&obj->obj_spinlock);
@@ -980,16 +874,9 @@ static struct tmem_pool * pool_alloc(void)
     INIT_LIST_HEAD(&pool->persistent_page_list);
     pool->cur_pgp = NULL;
     rwlock_init(&pool->pool_rwlock);
-    pool->pgp_count_max = pool->obj_count_max = 0;
-    pool->objnode_count = pool->objnode_count_max = 0;
     atomic_set(&pool->pgp_count,0);
-    pool->obj_count = 0; pool->shared_count = 0;
+    pool->shared_count = 0;
     pool->pageshift = PAGE_SHIFT - 12;
-    pool->good_puts = pool->puts = pool->dup_puts_flushed = 0;
-    pool->dup_puts_replaced = pool->no_mem_puts = 0;
-    pool->found_gets = pool->gets = 0;
-    pool->flushs_found = pool->flushs = 0;
-    pool->flush_objs_found = pool->flush_objs = 0;
     pool->is_dying = 0;
     return pool;
 }
@@ -1165,9 +1052,7 @@ static struct client *client_create(domid_t cli_id)
     INIT_LIST_HEAD(&client->ephemeral_page_list);
     INIT_LIST_HEAD(&client->persistent_invalidated_list);
     client->cur_pgp = NULL;
-    client->eph_count = client->eph_count_max = 0;
-    client->total_cycles = 0; client->succ_pers_puts = 0;
-    client->succ_eph_gets = 0; client->succ_pers_gets = 0;
+    client->eph_count = 0;
     tmem_client_info("ok\n");
     return client;
 
@@ -1275,7 +1160,6 @@ static int tmem_evict(void)
     int ret = 0;
     bool_t hold_pool_rwlock = 0;
 
-    evict_attempts++;
     tmem_spin_lock(&eph_lists_spinlock);
     if ( (client != NULL) && client_over_quota(client) &&
          !list_empty(&client->ephemeral_page_list) )
@@ -1320,7 +1204,6 @@ found:
         tmem_spin_unlock(&obj->obj_spinlock);
     if ( hold_pool_rwlock )
         tmem_write_unlock(&pool->pool_rwlock);
-    evicted_pgs++;
     ret = 1;
 
 out:
@@ -1406,8 +1289,6 @@ static int do_tmem_put_compress(struct 
tmem_page_descriptor *pgp, xen_pfn_t cmfn
         pgp->cdata = p;
     }
     pgp->size = size;
-    pgp->us.obj->pool->client->compressed_pages++;
-    pgp->us.obj->pool->client->compressed_sum_size += size;
     ret = 1;
 
 out:
@@ -1445,7 +1326,7 @@ static int do_tmem_dup_put(struct tmem_page_descriptor 
*pgp, xen_pfn_t cmfn,
         else if ( ret == -ENOMEM )
             goto failed_dup;
         else if ( ret == -EFAULT )
-            goto bad_copy;
+            goto cleanup;
     }
 
 copy_uncompressed:
@@ -1456,7 +1337,7 @@ copy_uncompressed:
     pgp->size = 0;
     ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_cli_buf_null);
     if ( ret < 0 )
-        goto bad_copy;
+        goto cleanup;
     if ( tmem_dedup_enabled() && !is_persistent(pool) )
     {
         if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
@@ -1469,16 +1350,8 @@ done:
         obj->last_client = client->cli_id;
     obj->no_evict = 0;
     tmem_spin_unlock(&obj->obj_spinlock);
-    pool->dup_puts_replaced++;
-    pool->good_puts++;
-    if ( is_persistent(pool) )
-        client->succ_pers_puts++;
     return 1;
 
-bad_copy:
-    failed_copies++;
-    goto cleanup;
-
 failed_dup:
    /* couldn't change out the data, flush the old data and return
     * -ENOSPC instead of -ENOMEM to differentiate failed _dup_ put */
@@ -1496,7 +1369,6 @@ cleanup:
         obj->no_evict = 0;
         tmem_spin_unlock(&obj->obj_spinlock);
     }
-    pool->dup_puts_flushed++;
     return ret;
 }
 
@@ -1512,7 +1384,6 @@ static int do_tmem_put(struct tmem_pool *pool,
     ASSERT(pool != NULL);
     client = pool->client;
     ret = client->frozen ? -EFROZEN : -ENOMEM;
-    pool->puts++;
     /* does page already exist (dup)?  if so, handle specially */
     if ( (obj = obj_find(pool,oidp)) != NULL )
     {
@@ -1563,16 +1434,14 @@ static int do_tmem_put(struct tmem_pool *pool,
             goto insert_page;
         if ( ret == -ENOMEM )
         {
-            client->compress_nomem++;
             goto del_pgp_from_obj;
         }
         if ( ret == 0 )
         {
-            client->compress_poor++;
             goto copy_uncompressed;
         }
         if ( ret == -EFAULT )
-            goto bad_copy;
+            goto del_pgp_from_obj;
     }
 
 copy_uncompressed:
@@ -1583,7 +1452,7 @@ copy_uncompressed:
     }
     ret = tmem_copy_from_client(pgp->pfp, cmfn, clibuf);
     if ( ret < 0 )
-        goto bad_copy;
+        goto del_pgp_from_obj;
 
     if ( tmem_dedup_enabled() && !is_persistent(pool) )
     {
@@ -1597,12 +1466,10 @@ insert_page:
         tmem_spin_lock(&eph_lists_spinlock);
         list_add_tail(&pgp->global_eph_pages,
             &global_ephemeral_page_list);
-        if (++global_eph_count > global_eph_count_max)
-            global_eph_count_max = global_eph_count;
+        ++global_eph_count;
         list_add_tail(&pgp->us.client_eph_pages,
             &client->ephemeral_page_list);
-        if (++client->eph_count > client->eph_count_max)
-            client->eph_count_max = client->eph_count;
+        ++client->eph_count;
         tmem_spin_unlock(&eph_lists_spinlock);
     }
     else
@@ -1619,17 +1486,8 @@ insert_page:
 
     /* free the obj spinlock */
     tmem_spin_unlock(&obj->obj_spinlock);
-    pool->good_puts++;
-
-    if ( is_persistent(pool) )
-        client->succ_pers_puts++;
-    else
-        tot_good_eph_puts++;
     return 1;
 
-bad_copy:
-    failed_copies++;
-
 del_pgp_from_obj:
     ASSERT((obj != NULL) && (pgp != NULL) && (pgp->index != -1));
     pgp_delete_from_obj(obj, pgp->index);
@@ -1648,7 +1506,6 @@ unlock_obj:
         obj->no_evict = 0;
         tmem_spin_unlock(&obj->obj_spinlock);
     }
-    pool->no_mem_puts++;
     return ret;
 }
 
@@ -1663,7 +1520,6 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid 
*oidp, uint32_t index,
     if ( !_atomic_read(pool->pgp_count) )
         return -EEMPTY;
 
-    pool->gets++;
     obj = obj_find(pool,oidp);
     if ( obj == NULL )
         return 0;
@@ -1690,7 +1546,7 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid 
*oidp, uint32_t index,
     else
         rc = tmem_copy_to_client(cmfn, pgp->pfp, clibuf);
     if ( rc <= 0 )
-        goto bad_copy;
+        goto out;
 
     if ( !is_persistent(pool) )
     {
@@ -1719,17 +1575,11 @@ static int do_tmem_get(struct tmem_pool *pool, struct 
oid *oidp, uint32_t index,
         obj->no_evict = 0;
         tmem_spin_unlock(&obj->obj_spinlock);
     }
-    pool->found_gets++;
-    if ( is_persistent(pool) )
-        client->succ_pers_gets++;
-    else
-        client->succ_eph_gets++;
     return 1;
 
-bad_copy:
+out:
     obj->no_evict = 0;
     tmem_spin_unlock(&obj->obj_spinlock);
-    failed_copies++;
     return rc;
 }
 
@@ -1738,7 +1588,6 @@ static int do_tmem_flush_page(struct tmem_pool *pool, 
struct oid *oidp, uint32_t
     struct tmem_object_root *obj;
     struct tmem_page_descriptor *pgp;
 
-    pool->flushs++;
     obj = obj_find(pool,oidp);
     if ( obj == NULL )
         goto out;
@@ -1759,7 +1608,6 @@ static int do_tmem_flush_page(struct tmem_pool *pool, 
struct oid *oidp, uint32_t
         obj->no_evict = 0;
         tmem_spin_unlock(&obj->obj_spinlock);
     }
-    pool->flushs_found++;
 
 out:
     if ( pool->client->frozen )
@@ -1772,13 +1620,11 @@ static int do_tmem_flush_object(struct tmem_pool *pool, 
struct oid *oidp)
 {
     struct tmem_object_root *obj;
 
-    pool->flush_objs++;
     obj = obj_find(pool,oidp);
     if ( obj == NULL )
         goto out;
     tmem_write_lock(&pool->pool_rwlock);
     obj_destroy(obj,0);
-    pool->flush_objs_found++;
     tmem_write_unlock(&pool->pool_rwlock);
 
 out:
@@ -1981,176 +1827,11 @@ static int tmemc_flush_mem(domid_t cli_id, uint32_t kb)
     return flushed_kb;
 }
 
-/*
- * These tmemc_list* routines output lots of stats in a format that is
- *  intended to be program-parseable, not human-readable. Further, by
- *  tying each group of stats to a line format indicator (e.g. G= for
- *  global stats) and each individual stat to a two-letter specifier
- *  (e.g. Ec:nnnnn in the G= line says there are nnnnn pages in the
- *  global ephemeral pool), it should allow the stats reported to be
- *  forward and backwards compatible as tmem evolves.
- */
-#define BSIZE 1024
-
-static int tmemc_list_client(struct client *c, tmem_cli_va_param_t buf,
-                             int off, uint32_t len, bool_t use_long)
-{
-    char info[BSIZE];
-    int i, n = 0, sum = 0;
-    struct tmem_pool *p;
-    bool_t s;
-
-    n = scnprintf(info,BSIZE,"C=CI:%d,ww:%d,ca:%d,co:%d,fr:%d,"
-        "Tc:%"PRIu64",Ge:%ld,Pp:%ld,Gp:%ld%c",
-        c->cli_id, c->weight, c->cap, c->compress, c->frozen,
-        c->total_cycles, c->succ_eph_gets, c->succ_pers_puts, 
c->succ_pers_gets,
-        use_long ? ',' : '\n');
-    if (use_long)
-        n += scnprintf(info+n,BSIZE-n,
-             "Ec:%ld,Em:%ld,cp:%ld,cb:%"PRId64",cn:%ld,cm:%ld\n",
-             c->eph_count, c->eph_count_max,
-             c->compressed_pages, c->compressed_sum_size,
-             c->compress_poor, c->compress_nomem);
-    tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
-    sum += n;
-    for ( i = 0; i < MAX_POOLS_PER_DOMAIN; i++ )
-    {
-        if ( (p = c->pools[i]) == NULL )
-            continue;
-        s = is_shared(p);
-        n = scnprintf(info,BSIZE,"P=CI:%d,PI:%d,"
-                      "PT:%c%c,U0:%"PRIx64",U1:%"PRIx64"%c",
-                      c->cli_id, p->pool_id,
-                      is_persistent(p) ? 'P' : 'E', s ? 'S' : 'P',
-                      (uint64_t)(s ? p->uuid[0] : 0),
-                      (uint64_t)(s ? p->uuid[1] : 0LL),
-                      use_long ? ',' : '\n');
-        if (use_long)
-            n += scnprintf(info+n,BSIZE-n,
-             "Pc:%d,Pm:%d,Oc:%ld,Om:%ld,Nc:%lu,Nm:%lu,"
-             "ps:%lu,pt:%lu,pd:%lu,pr:%lu,px:%lu,gs:%lu,gt:%lu,"
-             "fs:%lu,ft:%lu,os:%lu,ot:%lu\n",
-             _atomic_read(p->pgp_count), p->pgp_count_max,
-             p->obj_count, p->obj_count_max,
-             p->objnode_count, p->objnode_count_max,
-             p->good_puts, p->puts,p->dup_puts_flushed, p->dup_puts_replaced,
-             p->no_mem_puts, 
-             p->found_gets, p->gets,
-             p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs);
-        if ( sum + n >= len )
-            return sum;
-        tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
-        sum += n;
-    }
-    return sum;
-}
-
-static int tmemc_list_shared(tmem_cli_va_param_t buf, int off, uint32_t len,
-                              bool_t use_long)
-{
-    char info[BSIZE];
-    int i, n = 0, sum = 0;
-    struct tmem_pool *p;
-    struct share_list *sl;
-
-    for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++ )
-    {
-        if ( (p = global_shared_pools[i]) == NULL )
-            continue;
-        n = scnprintf(info+n,BSIZE-n,"S=SI:%d,PT:%c%c,U0:%"PRIx64",U1:%"PRIx64,
-                      i, is_persistent(p) ? 'P' : 'E',
-                      is_shared(p) ? 'S' : 'P',
-                      p->uuid[0], p->uuid[1]);
-        list_for_each_entry(sl,&p->share_list, share_list)
-            n += scnprintf(info+n,BSIZE-n,",SC:%d",sl->client->cli_id);
-        n += scnprintf(info+n,BSIZE-n,"%c", use_long ? ',' : '\n');
-        if (use_long)
-            n += scnprintf(info+n,BSIZE-n,
-             "Pc:%d,Pm:%d,Oc:%ld,Om:%ld,Nc:%lu,Nm:%lu,"
-             "ps:%lu,pt:%lu,pd:%lu,pr:%lu,px:%lu,gs:%lu,gt:%lu,"
-             "fs:%lu,ft:%lu,os:%lu,ot:%lu\n",
-             _atomic_read(p->pgp_count), p->pgp_count_max,
-             p->obj_count, p->obj_count_max,
-             p->objnode_count, p->objnode_count_max,
-             p->good_puts, p->puts,p->dup_puts_flushed, p->dup_puts_replaced,
-             p->no_mem_puts, 
-             p->found_gets, p->gets,
-             p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs);
-        if ( sum + n >= len )
-            return sum;
-        tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
-        sum += n;
-    }
-    return sum;
-}
-
-static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off,
-                                  uint32_t len, bool_t use_long)
-{
-    char info[BSIZE];
-    int n = 0, sum = 0;
-
-    n = scnprintf(info+n,BSIZE-n,"T=");
-    n--; /* overwrite trailing comma */
-    n += scnprintf(info+n,BSIZE-n,"\n");
-    if ( sum + n >= len )
-        return sum;
-    tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
-    sum += n;
-    return sum;
-}
-
-static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len,
-                              bool_t use_long)
-{
-    char info[BSIZE];
-    int n = 0, sum = off;
-
-    n += scnprintf(info,BSIZE,"G="
-      "Tt:%lu,Te:%lu,Cf:%lu,Af:%lu,Pf:%lu,Ta:%lu,"
-      "Lm:%lu,Et:%lu,Ea:%lu,Rt:%lu,Ra:%lu,Rx:%lu,Fp:%lu%c",
-      total_tmem_ops, errored_tmem_ops, failed_copies,
-      alloc_failed, alloc_page_failed, tmem_page_list_pages,
-      low_on_memory, evicted_pgs,
-      evict_attempts, relinq_pgs, relinq_attempts, max_evicts_per_relinq,
-      total_flush_pool, use_long ? ',' : '\n');
-    if (use_long)
-        n += scnprintf(info+n,BSIZE-n,
-          "Ec:%ld,Em:%ld,Oc:%d,Om:%d,Nc:%d,Nm:%d,Pc:%d,Pm:%d,"
-          "Fc:%d,Fm:%d,Sc:%d,Sm:%d,Ep:%lu,Gd:%lu,Zt:%lu,Gz:%lu\n",
-          global_eph_count, global_eph_count_max,
-          _atomic_read(global_obj_count), global_obj_count_max,
-          _atomic_read(global_rtree_node_count), global_rtree_node_count_max,
-          _atomic_read(global_pgp_count), global_pgp_count_max,
-          _atomic_read(global_page_count), global_page_count_max,
-          _atomic_read(global_pcd_count), global_pcd_count_max,
-         tot_good_eph_puts,deduped_puts,pcd_tot_tze_size,pcd_tot_csize);
-    if ( sum + n >= len )
-        return sum;
-    tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
-    sum += n;
-    return sum;
-}
-
 static int tmemc_list(domid_t cli_id, tmem_cli_va_param_t buf, uint32_t len,
                                bool_t use_long)
 {
-    struct client *client;
-    int off = 0;
-
-    if ( cli_id == TMEM_CLI_ID_NULL ) {
-        off = tmemc_list_global(buf,0,len,use_long);
-        off += tmemc_list_shared(buf,off,len-off,use_long);
-        list_for_each_entry(client,&global_client_list,client_list)
-            off += tmemc_list_client(client, buf, off, len-off, use_long);
-        off += tmemc_list_global_perf(buf,off,len-off,use_long);
-    }
-    else if ( (client = tmem_client_from_cli_id(cli_id)) == NULL)
-        return -1;
-    else
-        off = tmemc_list_client(client, buf, 0, len, use_long);
-
-    return 0;
+    tmem_client_info("tmemc_list is not implemented yet!\n");
+    return -ENOSYS;
 }
 
 static int tmemc_set_var_one(struct client *client, uint32_t subop, uint32_t 
arg1)
@@ -2541,9 +2222,6 @@ long do_tmem_op(tmem_cli_op_t uops)
     struct tmem_pool *pool = NULL;
     struct oid *oidp;
     int rc = 0;
-    bool_t succ_get = 0, succ_put = 0;
-    bool_t non_succ_get = 0, non_succ_put = 0;
-    bool_t flush = 0, flush_obj = 0;
     bool_t tmem_write_lock_set = 0, tmem_read_lock_set = 0;
 
     if ( !tmem_initialized )
@@ -2552,8 +2230,6 @@ long do_tmem_op(tmem_cli_op_t uops)
     if ( !tmem_current_permitted() )
         return -EPERM;
 
-    total_tmem_ops++;
-
     if ( tmem_lock_all )
     {
         if ( tmem_lock_all > 1 )
@@ -2568,7 +2244,6 @@ long do_tmem_op(tmem_cli_op_t uops)
         if ( tmem_lock_all )
             goto out;
  simple_error:
-        errored_tmem_ops++;
         return rc;
     }
 
@@ -2650,25 +2325,18 @@ long do_tmem_op(tmem_cli_op_t uops)
         tmem_ensure_avail_pages();
         rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
                         tmem_cli_buf_null);
-        if (rc == 1) succ_put = 1;
-        else non_succ_put = 1;
         break;
     case TMEM_GET_PAGE:
         rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
                         tmem_cli_buf_null);
-        if (rc == 1) succ_get = 1;
-        else non_succ_get = 1;
         break;
     case TMEM_FLUSH_PAGE:
-        flush = 1;
         rc = do_tmem_flush_page(pool, oidp, op.u.gen.index);
         break;
     case TMEM_FLUSH_OBJECT:
         rc = do_tmem_flush_object(pool, oidp);
-        flush_obj = 1;
         break;
     case TMEM_DESTROY_POOL:
-        flush = 1;
         rc = do_tmem_destroy_pool(op.pool_id);
         break;
     default:
@@ -2678,8 +2346,6 @@ long do_tmem_op(tmem_cli_op_t uops)
     }
 
 out:
-    if ( rc < 0 )
-        errored_tmem_ops++;
     if ( tmem_lock_all )
     {
         if ( tmem_lock_all > 1 )
@@ -2756,7 +2422,6 @@ void *tmem_relinquish_pages(unsigned int order, unsigned 
int memflags)
     if (!tmem_enabled() || !tmem_freeable_pages())
         return NULL;
 
-    relinq_attempts++;
     if ( order > 0 )
     {
 #ifndef NDEBUG
@@ -2779,13 +2444,10 @@ void *tmem_relinquish_pages(unsigned int order, 
unsigned int memflags)
             break;
         evicts_per_relinq++;
     }
-    if ( evicts_per_relinq > max_evicts_per_relinq )
-        max_evicts_per_relinq = evicts_per_relinq;
     if ( pfp != NULL )
     {
         if ( !(memflags & MEMF_tmem) )
             scrub_one_page(pfp);
-        relinq_pgs++;
     }
 
     if ( tmem_called_from_tmem(memflags) )
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index d842374..3777543 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -363,14 +363,6 @@ static inline int tmem_get_tmemop_from_client(tmem_op_t 
*op, tmem_cli_op_t uops)
 }
 
 #define tmem_cli_buf_null guest_handle_from_ptr(NULL, char)
-
-static inline void tmem_copy_to_client_buf_offset(tmem_cli_va_param_t clibuf,
-                                                int off,
-                                                char *tmembuf, int len)
-{
-    copy_to_guest_offset(clibuf,off,tmembuf,len);
-}
-
 #define tmem_copy_to_client_buf(clibuf, tmembuf, cnt) \
     copy_to_guest(guest_handle_cast(clibuf, void), tmembuf, cnt)
 
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.