[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] tmem: cleanup: rename 'tmh_' with 'tmem_'



commit 1f4271763027700168f2aac9b561633c2fccca59
Author:     Bob Liu <lliubbo@xxxxxxxxx>
AuthorDate: Fri Nov 8 09:03:50 2013 +0800
Commit:     Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
CommitDate: Tue Nov 12 10:15:24 2013 -0500

    tmem: cleanup: rename 'tmh_' with 'tmem_'
    
    tmem was designed can be ported to other platform besides xen easily, but I
    don't think anybody will port tmem to other platform. And this flexible
    character made tmem not easy for understand, there are too many 'tmh_' and
    'tmem_' functions and variables.
    
    This patch replace all 'tmh_' functions/variables with 'tmem_' to make code
    more readable.
    
    Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
    Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 xen/common/tmem.c          |  416 ++++++++++++++++++++++----------------------
 xen/common/tmem_xen.c      |  102 ++++++------
 xen/include/xen/tmem_xen.h |  210 +++++++++++------------
 3 files changed, 356 insertions(+), 372 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index f3a0d91..7d22e0c 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -29,12 +29,6 @@
 
 #define TMEM_SPEC_VERSION 1
 
-/************  INTERFACE TO TMEM HOST-DEPENDENT (tmh) CODE ************/
-
-#define CLI_ID_NULL TMH_CLI_ID_NULL
-#define cli_id_str  tmh_cli_id_str
-#define client_str  tmh_client_str
-
 /************ DEBUG and STATISTICS (+ some compression testing) *******/
 
 #ifndef NDEBUG
@@ -110,7 +104,7 @@ struct tmem_page_content_descriptor;
 struct client {
     struct list_head client_list;
     struct tm_pool *pools[MAX_POOLS_PER_DOMAIN];
-    tmh_client_t *tmh;
+    tmem_client_t *tmem;
     struct list_head ephemeral_page_list;
     long eph_count, eph_count_max;
     cli_id_t cli_id;
@@ -275,22 +269,22 @@ static int tmem_initialized = 0;
 
 /************ CONCURRENCY  ***********************************************/
 
-EXPORT DEFINE_SPINLOCK(tmem_spinlock);  /* used iff tmh_lock_all */
-EXPORT DEFINE_RWLOCK(tmem_rwlock);      /* used iff !tmh_lock_all */
+EXPORT DEFINE_SPINLOCK(tmem_spinlock);  /* used iff tmem_lock_all */
+EXPORT DEFINE_RWLOCK(tmem_rwlock);      /* used iff !tmem_lock_all */
 static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */
 static DEFINE_SPINLOCK(pers_lists_spinlock);
 
-#define tmem_spin_lock(_l)  do {if (!tmh_lock_all) spin_lock(_l);}while(0)
-#define tmem_spin_unlock(_l)  do {if (!tmh_lock_all) spin_unlock(_l);}while(0)
-#define tmem_read_lock(_l)  do {if (!tmh_lock_all) read_lock(_l);}while(0)
-#define tmem_read_unlock(_l)  do {if (!tmh_lock_all) read_unlock(_l);}while(0)
-#define tmem_write_lock(_l)  do {if (!tmh_lock_all) write_lock(_l);}while(0)
-#define tmem_write_unlock(_l)  do {if (!tmh_lock_all) 
write_unlock(_l);}while(0)
-#define tmem_write_trylock(_l)  ((tmh_lock_all)?1:write_trylock(_l))
-#define tmem_spin_trylock(_l)  (tmh_lock_all?1:spin_trylock(_l))
+#define tmem_spin_lock(_l)  do {if (!tmem_lock_all) spin_lock(_l);}while(0)
+#define tmem_spin_unlock(_l)  do {if (!tmem_lock_all) spin_unlock(_l);}while(0)
+#define tmem_read_lock(_l)  do {if (!tmem_lock_all) read_lock(_l);}while(0)
+#define tmem_read_unlock(_l)  do {if (!tmem_lock_all) read_unlock(_l);}while(0)
+#define tmem_write_lock(_l)  do {if (!tmem_lock_all) write_lock(_l);}while(0)
+#define tmem_write_unlock(_l)  do {if (!tmem_lock_all) 
write_unlock(_l);}while(0)
+#define tmem_write_trylock(_l)  ((tmem_lock_all)?1:write_trylock(_l))
+#define tmem_spin_trylock(_l)  (tmem_lock_all?1:spin_trylock(_l))
 
-#define ASSERT_SPINLOCK(_l) ASSERT(tmh_lock_all || spin_is_locked(_l))
-#define ASSERT_WRITELOCK(_l) ASSERT(tmh_lock_all || rw_is_write_locked(_l))
+#define ASSERT_SPINLOCK(_l) ASSERT(tmem_lock_all || spin_is_locked(_l))
+#define ASSERT_WRITELOCK(_l) ASSERT(tmem_lock_all || rw_is_write_locked(_l))
 
 /* global counters (should use long_atomic_t access) */
 static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock */
@@ -325,9 +319,9 @@ static NOINLINE void *_tmem_malloc(size_t size, size_t 
align, pool_t *pool)
     void *v;
 
     if ( (pool != NULL) && is_persistent(pool) )
-        v = tmh_alloc_subpage_thispool(pool,size,align);
+        v = tmem_alloc_subpage_thispool(pool,size,align);
     else
-        v = tmh_alloc_subpage(pool, size, align);
+        v = tmem_alloc_subpage(pool, size, align);
     if ( v == NULL )
         alloc_failed++;
     return v;
@@ -336,9 +330,9 @@ static NOINLINE void *_tmem_malloc(size_t size, size_t 
align, pool_t *pool)
 static NOINLINE void tmem_free(void *p, size_t size, pool_t *pool)
 {
     if ( pool == NULL || !is_persistent(pool) )
-        tmh_free_subpage(p,size);
+        tmem_free_subpage(p,size);
     else
-        tmh_free_subpage_thispool(pool,p,size);
+        tmem_free_subpage_thispool(pool,p,size);
 }
 
 static NOINLINE struct page_info *tmem_page_alloc(pool_t *pool)
@@ -346,9 +340,9 @@ static NOINLINE struct page_info *tmem_page_alloc(pool_t 
*pool)
     struct page_info *pfp = NULL;
 
     if ( pool != NULL && is_persistent(pool) )
-        pfp = tmh_alloc_page_thispool(pool);
+        pfp = tmem_alloc_page_thispool(pool);
     else
-        pfp = tmh_alloc_page(pool,0);
+        pfp = tmem_alloc_page(pool,0);
     if ( pfp == NULL )
         alloc_page_failed++;
     else
@@ -360,9 +354,9 @@ static NOINLINE void tmem_page_free(pool_t *pool, struct 
page_info *pfp)
 {
     ASSERT(pfp);
     if ( pool == NULL || !is_persistent(pool) )
-        tmh_free_page(pfp);
+        tmem_free_page(pfp);
     else
-        tmh_free_page_thispool(pool,pfp);
+        tmem_free_page_thispool(pool,pfp);
     atomic_dec_and_assert(global_page_count);
 }
 
@@ -376,18 +370,18 @@ static NOINLINE int pcd_copy_to_client(xen_pfn_t cmfn, 
pgp_t *pgp)
     pcd_t *pcd;
     int ret;
 
-    ASSERT(tmh_dedup_enabled());
+    ASSERT(tmem_dedup_enabled());
     tmem_read_lock(&pcd_tree_rwlocks[firstbyte]);
     pcd = pgp->pcd;
     if ( pgp->size < PAGE_SIZE && pgp->size != 0 &&
          pcd->size < PAGE_SIZE && pcd->size != 0 )
-        ret = tmh_decompress_to_client(cmfn, pcd->cdata, pcd->size,
-                                       tmh_cli_buf_null);
-    else if ( tmh_tze_enabled() && pcd->size < PAGE_SIZE )
-        ret = tmh_copy_tze_to_client(cmfn, pcd->tze, pcd->size);
+        ret = tmem_decompress_to_client(cmfn, pcd->cdata, pcd->size,
+                                       tmem_cli_buf_null);
+    else if ( tmem_tze_enabled() && pcd->size < PAGE_SIZE )
+        ret = tmem_copy_tze_to_client(cmfn, pcd->tze, pcd->size);
     else
-        ret = tmh_copy_to_client(cmfn, pcd->pfp, 0, 0, PAGE_SIZE,
-                                 tmh_cli_buf_null);
+        ret = tmem_copy_to_client(cmfn, pcd->pfp, 0, 0, PAGE_SIZE,
+                                 tmem_cli_buf_null);
     tmem_read_unlock(&pcd_tree_rwlocks[firstbyte]);
     return ret;
 }
@@ -405,7 +399,7 @@ static NOINLINE void pcd_disassociate(pgp_t *pgp, pool_t 
*pool, bool_t have_pcd_
     char *pcd_cdata = pgp->pcd->cdata;
     pagesize_t pcd_csize = pgp->pcd->size;
 
-    ASSERT(tmh_dedup_enabled());
+    ASSERT(tmem_dedup_enabled());
     ASSERT(firstbyte != NOT_SHAREABLE);
     ASSERT(firstbyte < 256);
 
@@ -447,9 +441,9 @@ static NOINLINE void pcd_disassociate(pgp_t *pgp, pool_t 
*pool, bool_t have_pcd_
             tmem_free(pcd_tze,pcd_size,pool);
     } else {
         /* real physical page */
-        if ( tmh_tze_enabled() )
+        if ( tmem_tze_enabled() )
             pcd_tot_tze_size -= PAGE_SIZE;
-        if ( tmh_compression_enabled() )
+        if ( tmem_compression_enabled() )
             pcd_tot_csize -= PAGE_SIZE;
         tmem_page_free(pool,pfp);
     }
@@ -464,10 +458,10 @@ static NOINLINE int pcd_associate(pgp_t *pgp, char 
*cdata, pagesize_t csize)
     pcd_t *pcd;
     int cmp;
     pagesize_t pfp_size = 0;
-    uint8_t firstbyte = (cdata == NULL) ? tmh_get_first_byte(pgp->pfp) : 
*cdata;
+    uint8_t firstbyte = (cdata == NULL) ? tmem_get_first_byte(pgp->pfp) : 
*cdata;
     int ret = 0;
 
-    if ( !tmh_dedup_enabled() )
+    if ( !tmem_dedup_enabled() )
         return 0;
     ASSERT(pgp->us.obj != NULL);
     ASSERT(pgp->us.obj->pool != NULL);
@@ -476,9 +470,9 @@ static NOINLINE int pcd_associate(pgp_t *pgp, char *cdata, 
pagesize_t csize)
     {
         ASSERT(pgp->pfp != NULL);
         pfp_size = PAGE_SIZE;
-        if ( tmh_tze_enabled() )
+        if ( tmem_tze_enabled() )
         {
-            pfp_size = tmh_tze_pfp_scan(pgp->pfp);
+            pfp_size = tmem_tze_pfp_scan(pgp->pfp);
             if ( pfp_size > PCD_TZE_MAX_SIZE )
                 pfp_size = PAGE_SIZE;
         }
@@ -499,25 +493,25 @@ static NOINLINE int pcd_associate(pgp_t *pgp, char 
*cdata, pagesize_t csize)
         {
             if ( pcd->size < PAGE_SIZE )
                 /* both new entry and rbtree entry are compressed */
-                cmp = tmh_pcd_cmp(cdata,csize,pcd->cdata,pcd->size);
+                cmp = tmem_pcd_cmp(cdata,csize,pcd->cdata,pcd->size);
             else
                 /* new entry is compressed, rbtree entry is not */
                 cmp = -1;
         } else if ( pcd->size < PAGE_SIZE )
             /* rbtree entry is compressed, rbtree entry is not */
             cmp = 1;
-        else if ( tmh_tze_enabled() ) {
+        else if ( tmem_tze_enabled() ) {
             if ( pcd->size < PAGE_SIZE )
                 /* both new entry and rbtree entry are trailing zero */
-                cmp = tmh_tze_pfp_cmp(pgp->pfp,pfp_size,pcd->tze,pcd->size);
+                cmp = tmem_tze_pfp_cmp(pgp->pfp,pfp_size,pcd->tze,pcd->size);
             else
                 /* new entry is trailing zero, rbtree entry is not */
-                cmp = tmh_tze_pfp_cmp(pgp->pfp,pfp_size,pcd->pfp,PAGE_SIZE);
+                cmp = tmem_tze_pfp_cmp(pgp->pfp,pfp_size,pcd->pfp,PAGE_SIZE);
         } else  {
             /* both new entry and rbtree entry are full physical pages */
             ASSERT(pgp->pfp != NULL);
             ASSERT(pcd->pfp != NULL);
-            cmp = tmh_page_cmp(pgp->pfp,pcd->pfp);
+            cmp = tmem_page_cmp(pgp->pfp,pcd->pfp);
         }
 
         /* walk tree or match depending on cmp */
@@ -559,21 +553,21 @@ static NOINLINE int pcd_associate(pgp_t *pgp, char 
*cdata, pagesize_t csize)
         pcd->size = csize;
         pcd_tot_csize += csize;
     } else if ( pfp_size == 0 ) {
-        ASSERT(tmh_tze_enabled());
+        ASSERT(tmem_tze_enabled());
         pcd->size = 0;
         pcd->tze = NULL;
     } else if ( pfp_size < PAGE_SIZE &&
          ((pcd->tze = tmem_malloc_bytes(pfp_size,pgp->us.obj->pool)) != NULL) 
) {
-        tmh_tze_copy_from_pfp(pcd->tze,pgp->pfp,pfp_size);
+        tmem_tze_copy_from_pfp(pcd->tze,pgp->pfp,pfp_size);
         pcd->size = pfp_size;
         pcd_tot_tze_size += pfp_size;
         tmem_page_free(pgp->us.obj->pool,pgp->pfp);
     } else {
         pcd->pfp = pgp->pfp;
         pcd->size = PAGE_SIZE;
-        if ( tmh_tze_enabled() )
+        if ( tmem_tze_enabled() )
             pcd_tot_tze_size += PAGE_SIZE;
-        if ( tmh_compression_enabled() )
+        if ( tmem_compression_enabled() )
             pcd_tot_csize += PAGE_SIZE;
     }
     rb_link_node(&pcd->pcd_rb_tree_node, parent, new);
@@ -608,7 +602,7 @@ static NOINLINE pgp_t *pgp_alloc(obj_t *obj)
     INIT_LIST_HEAD(&pgp->global_eph_pages);
     INIT_LIST_HEAD(&pgp->us.client_eph_pages);
     pgp->pfp = NULL;
-    if ( tmh_dedup_enabled() )
+    if ( tmem_dedup_enabled() )
     {
         pgp->firstbyte = NOT_SHAREABLE;
         pgp->eviction_attempted = 0;
@@ -639,7 +633,7 @@ static NOINLINE void pgp_free_data(pgp_t *pgp, pool_t *pool)
 
     if ( pgp->pfp == NULL )
         return;
-    if ( tmh_dedup_enabled() && pgp->firstbyte != NOT_SHAREABLE )
+    if ( tmem_dedup_enabled() && pgp->firstbyte != NOT_SHAREABLE )
         pcd_disassociate(pgp,pool,0); /* pgp->size lost */
     else if ( pgp_size )
         tmem_free(pgp->cdata,pgp_size,pool);
@@ -876,7 +870,7 @@ void oid_set_invalid(OID *oidp)
 
 unsigned oid_hash(OID *oidp)
 {
-    return (tmh_hash(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
+    return (tmem_hash(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
                      BITS_PER_LONG) & OBJ_HASH_BUCKETS_MASK);
 }
 
@@ -895,7 +889,7 @@ restart_find:
         switch ( oid_compare(&obj->oid, oidp) )
         {
             case 0: /* equal */
-                if ( tmh_lock_all )
+                if ( tmem_lock_all )
                     obj->no_evict = 1;
                 else
                 {
@@ -942,7 +936,7 @@ static NOINLINE void obj_free(obj_t *obj, int no_rebalance)
     obj->pool = NULL;
     old_oid = obj->oid;
     oid_set_invalid(&obj->oid);
-    obj->last_client = CLI_ID_NULL;
+    obj->last_client = TMEM_CLI_ID_NULL;
     atomic_dec_and_assert(global_obj_count);
     /* use no_rebalance only if all objects are being destroyed anyway */
     if ( !no_rebalance )
@@ -1001,7 +995,7 @@ static NOINLINE obj_t * obj_new(pool_t *pool, OID *oidp)
     obj->oid = *oidp;
     obj->objnode_count = 0;
     obj->pgp_count = 0;
-    obj->last_client = CLI_ID_NULL;
+    obj->last_client = TMEM_CLI_ID_NULL;
     SET_SENTINEL(obj,OBJ);
     tmem_spin_lock(&obj->obj_spinlock);
     obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj);
@@ -1056,7 +1050,7 @@ static pool_t * pool_alloc(void)
     pool_t *pool;
     int i;
 
-    if ( (pool = tmh_alloc_infra(sizeof(pool_t),__alignof__(pool_t))) == NULL )
+    if ( (pool = tmem_alloc_infra(sizeof(pool_t),__alignof__(pool_t))) == NULL 
)
         return NULL;
     for (i = 0; i < OBJ_HASH_BUCKETS; i++)
         pool->obj_rb_root[i] = RB_ROOT;
@@ -1085,7 +1079,7 @@ static NOINLINE void pool_free(pool_t *pool)
     INVERT_SENTINEL(pool,POOL);
     pool->client = NULL;
     list_del(&pool->pool_list);
-    tmh_free_infra(pool);
+    tmem_free_infra(pool);
 }
 
 /* register new_client as a user of this shared pool and return new
@@ -1100,8 +1094,8 @@ static int shared_pool_join(pool_t *pool, client_t 
*new_client)
     sl->client = new_client;
     list_add_tail(&sl->share_list, &pool->share_list);
     if ( new_client->cli_id != pool->client->cli_id )
-        tmh_client_info("adding new %s %d to shared pool owned by %s %d\n",
-            client_str, new_client->cli_id, client_str, pool->client->cli_id);
+        tmem_client_info("adding new %s %d to shared pool owned by %s %d\n",
+            tmem_client_str, new_client->cli_id, tmem_client_str, 
pool->client->cli_id);
     return ++pool->shared_count;
 }
 
@@ -1130,8 +1124,8 @@ static NOINLINE void shared_pool_reassign(pool_t *pool)
     old_client->eph_count -= _atomic_read(pool->pgp_count);
     list_splice_init(&old_client->ephemeral_page_list,
                      &new_client->ephemeral_page_list);
-    tmh_client_info("reassigned shared pool from %s=%d to %s=%d pool_id=%d\n",
-        cli_id_str, old_client->cli_id, cli_id_str, new_client->cli_id, 
poolid);
+    tmem_client_info("reassigned shared pool from %s=%d to %s=%d pool_id=%d\n",
+        tmem_cli_id_str, old_client->cli_id, tmem_cli_id_str, 
new_client->cli_id, poolid);
     pool->pool_id = poolid;
 }
 
@@ -1166,8 +1160,8 @@ static NOINLINE int shared_pool_quit(pool_t *pool, 
cli_id_t cli_id)
             }
         return 0;
     }
-    tmh_client_warn("tmem: no match unsharing pool, %s=%d\n",
-        cli_id_str,pool->client->cli_id);
+    tmem_client_warn("tmem: no match unsharing pool, %s=%d\n",
+        tmem_cli_id_str,pool->client->cli_id);
     return -1;
 }
 
@@ -1177,22 +1171,22 @@ static void pool_flush(pool_t *pool, cli_id_t cli_id, 
bool_t destroy)
     ASSERT(pool != NULL);
     if ( (is_shared(pool)) && (shared_pool_quit(pool,cli_id) > 0) )
     {
-        tmh_client_warn("tmem: %s=%d no longer using shared pool %d owned by 
%s=%d\n",
-           cli_id_str, cli_id, pool->pool_id, cli_id_str,pool->client->cli_id);
+        tmem_client_warn("tmem: %s=%d no longer using shared pool %d owned by 
%s=%d\n",
+           tmem_cli_id_str, cli_id, pool->pool_id, 
tmem_cli_id_str,pool->client->cli_id);
         return;
     }
-    tmh_client_info("%s %s-%s tmem pool %s=%d pool_id=%d\n",
+    tmem_client_info("%s %s-%s tmem pool %s=%d pool_id=%d\n",
                     destroy ? "destroying" : "flushing",
                     is_persistent(pool) ? "persistent" : "ephemeral" ,
                     is_shared(pool) ? "shared" : "private",
-                    cli_id_str, pool->client->cli_id, pool->pool_id);
+                    tmem_cli_id_str, pool->client->cli_id, pool->pool_id);
     if ( pool->client->live_migrating )
     {
-        tmh_client_warn("can't %s pool while %s is live-migrating\n",
-               destroy?"destroy":"flush", client_str);
+        tmem_client_warn("can't %s pool while %s is live-migrating\n",
+               destroy?"destroy":"flush", tmem_client_str);
         return;
     }
-    pool_destroy_objs(pool,0,CLI_ID_NULL);
+    pool_destroy_objs(pool,0,TMEM_CLI_ID_NULL);
     if ( destroy )
     {
         pool->client->pools[pool->pool_id] = NULL;
@@ -1204,30 +1198,30 @@ static void pool_flush(pool_t *pool, cli_id_t cli_id, 
bool_t destroy)
 
 static client_t *client_create(cli_id_t cli_id)
 {
-    client_t *client = tmh_alloc_infra(sizeof(client_t),__alignof__(client_t));
+    client_t *client = 
tmem_alloc_infra(sizeof(client_t),__alignof__(client_t));
     int i;
 
-    tmh_client_info("tmem: initializing tmem capability for %s=%d...",
-                    cli_id_str, cli_id);
+    tmem_client_info("tmem: initializing tmem capability for %s=%d...",
+                    tmem_cli_id_str, cli_id);
     if ( client == NULL )
     {
-        tmh_client_err("failed... out of memory\n");
+        tmem_client_err("failed... out of memory\n");
         goto fail;
     }
     memset(client,0,sizeof(client_t));
-    if ( (client->tmh = tmh_client_init(cli_id)) == NULL )
+    if ( (client->tmem = tmem_client_init(cli_id)) == NULL )
     {
-        tmh_client_err("failed... can't allocate host-dependent part of 
client\n");
+        tmem_client_err("failed... can't allocate host-dependent part of 
client\n");
         goto fail;
     }
-    if ( !tmh_set_client_from_id(client, client->tmh, cli_id) )
+    if ( !tmem_set_client_from_id(client, client->tmem, cli_id) )
     {
-        tmh_client_err("failed... can't set client\n");
+        tmem_client_err("failed... can't set client\n");
         goto fail;
     }
     client->cli_id = cli_id;
-    client->compress = tmh_compression_enabled();
-    client->shared_auth_required = tmh_shared_auth();
+    client->compress = tmem_compression_enabled();
+    client->shared_auth_required = tmem_shared_auth();
     for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
         client->shared_auth_uuid[i][0] =
             client->shared_auth_uuid[i][1] = -1L;
@@ -1240,19 +1234,19 @@ static client_t *client_create(cli_id_t cli_id)
     client->eph_count = client->eph_count_max = 0;
     client->total_cycles = 0; client->succ_pers_puts = 0;
     client->succ_eph_gets = 0; client->succ_pers_gets = 0;
-    tmh_client_info("ok\n");
+    tmem_client_info("ok\n");
     return client;
 
  fail:
-    tmh_free_infra(client);
+    tmem_free_infra(client);
     return NULL;
 }
 
 static void client_free(client_t *client)
 {
     list_del(&client->client_list);
-    tmh_client_destroy(client->tmh);
-    tmh_free_infra(client);
+    tmem_client_destroy(client->tmem);
+    tmem_free_infra(client);
 }
 
 /* flush all data from a client and, optionally, free it */
@@ -1301,11 +1295,11 @@ static bool_t tmem_try_to_evict_pgp(pgp_t *pgp, bool_t 
*hold_pool_rwlock)
 
     if ( pool->is_dying )
         return 0;
-    if ( tmh_lock_all && !obj->no_evict )
+    if ( tmem_lock_all && !obj->no_evict )
        return 1;
     if ( tmem_spin_trylock(&obj->obj_spinlock) )
     {
-        if ( tmh_dedup_enabled() )
+        if ( tmem_dedup_enabled() )
         {
             firstbyte = pgp->firstbyte;
             if ( firstbyte ==  NOT_SHAREABLE )
@@ -1340,7 +1334,7 @@ obj_unlock:
 
 static int tmem_evict(void)
 {
-    client_t *client = tmh_client_from_current();
+    client_t *client = tmem_client_from_current();
     pgp_t *pgp = NULL, *pgp2, *pgp_del;
     obj_t *obj;
     pool_t *pool;
@@ -1379,7 +1373,7 @@ found:
     ASSERT_SPINLOCK(&obj->obj_spinlock);
     pgp_del = pgp_delete_from_obj(obj, pgp->index);
     ASSERT(pgp_del == pgp);
-    if ( tmh_dedup_enabled() && pgp->firstbyte != NOT_SHAREABLE )
+    if ( tmem_dedup_enabled() && pgp->firstbyte != NOT_SHAREABLE )
     {
         ASSERT(pgp->pcd->pgp_ref_count == 1 || pgp->eviction_attempted);
         pcd_disassociate(pgp,pool,1);
@@ -1406,13 +1400,13 @@ static unsigned long tmem_relinquish_npages(unsigned 
long n)
 {
     unsigned long avail_pages = 0;
 
-    while ( (avail_pages = tmh_avail_pages()) < n )
+    while ( (avail_pages = tmem_page_list_pages) < n )
     {
         if (  !tmem_evict() )
             break;
     }
     if ( avail_pages )
-        tmh_release_avail_pages_to_host();
+        tmem_release_avail_pages_to_host();
     return avail_pages;
 }
 
@@ -1425,7 +1419,7 @@ static inline void tmem_ensure_avail_pages(void)
 {
     int failed_evict = 10;
 
-    while ( !tmh_free_mb() )
+    while ( !tmem_free_mb() )
     {
         if ( tmem_evict() )
             continue;
@@ -1453,13 +1447,13 @@ static NOINLINE int do_tmem_put_compress(pgp_t *pgp, 
xen_pfn_t cmfn,
     if ( pgp->pfp != NULL )
         pgp_free_data(pgp, pgp->us.obj->pool);
     START_CYC_COUNTER(compress);
-    ret = tmh_compress_from_client(cmfn, &dst, &size, clibuf);
+    ret = tmem_compress_from_client(cmfn, &dst, &size, clibuf);
     if ( ret <= 0 )
         goto out;
     else if ( (size == 0) || (size >= tmem_subpage_maxsize()) ) {
         ret = 0;
         goto out;
-    } else if ( tmh_dedup_enabled() && !is_persistent(pgp->us.obj->pool) ) {
+    } else if ( tmem_dedup_enabled() && !is_persistent(pgp->us.obj->pool) ) {
         if ( (ret = pcd_associate(pgp,dst,size)) == -ENOMEM )
             goto out;
     } else if ( (p = tmem_malloc_bytes(size,pgp->us.obj->pool)) == NULL ) {
@@ -1520,12 +1514,12 @@ copy_uncompressed:
     if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL )
         goto failed_dup;
     pgp->size = 0;
-    /* tmh_copy_from_client properly handles len==0 and offsets != 0 */
-    ret = tmh_copy_from_client(pgp->pfp, cmfn, tmem_offset, pfn_offset, len,
-                               tmh_cli_buf_null);
+    /* tmem_copy_from_client properly handles len==0 and offsets != 0 */
+    ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_offset, pfn_offset, len,
+                               tmem_cli_buf_null);
     if ( ret < 0 )
         goto bad_copy;
-    if ( tmh_dedup_enabled() && !is_persistent(pool) )
+    if ( tmem_dedup_enabled() && !is_persistent(pool) )
     {
         if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
             goto failed_dup;
@@ -1645,12 +1639,12 @@ copy_uncompressed:
         ret = -ENOMEM;
         goto delete_and_free;
     }
-    /* tmh_copy_from_client properly handles len==0 (TMEM_NEW_PAGE) */
-    ret = tmh_copy_from_client(pgp->pfp, cmfn, tmem_offset, pfn_offset, len,
+    /* tmem_copy_from_client properly handles len==0 (TMEM_NEW_PAGE) */
+    ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_offset, pfn_offset, len,
                                clibuf);
     if ( ret < 0 )
         goto bad_copy;
-    if ( tmh_dedup_enabled() && !is_persistent(pool) )
+    if ( tmem_dedup_enabled() && !is_persistent(pool) )
     {
         if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
             goto delete_and_free;
@@ -1743,18 +1737,18 @@ static NOINLINE int do_tmem_get(pool_t *pool, OID 
*oidp, uint32_t index,
         return 0;
     }
     ASSERT(pgp->size != -1);
-    if ( tmh_dedup_enabled() && !is_persistent(pool) &&
+    if ( tmem_dedup_enabled() && !is_persistent(pool) &&
               pgp->firstbyte != NOT_SHAREABLE )
         rc = pcd_copy_to_client(cmfn, pgp);
     else if ( pgp->size != 0 )
     {
         START_CYC_COUNTER(decompress);
-        rc = tmh_decompress_to_client(cmfn, pgp->cdata,
+        rc = tmem_decompress_to_client(cmfn, pgp->cdata,
                                       pgp->size, clibuf);
         END_CYC_COUNTER(decompress);
     }
     else
-        rc = tmh_copy_to_client(cmfn, pgp->pfp, tmem_offset,
+        rc = tmem_copy_to_client(cmfn, pgp->pfp, tmem_offset,
                                 pfn_offset, len, clibuf);
     if ( rc <= 0 )
         goto bad_copy;
@@ -1778,7 +1772,7 @@ static NOINLINE int do_tmem_get(pool_t *pool, OID *oidp, 
uint32_t index,
             list_del(&pgp->us.client_eph_pages);
             
list_add_tail(&pgp->us.client_eph_pages,&client->ephemeral_page_list);
             tmem_spin_unlock(&eph_lists_spinlock);
-            obj->last_client = tmh_get_cli_id_from_current();
+            obj->last_client = tmem_get_cli_id_from_current();
         }
     }
     if ( obj != NULL )
@@ -1857,7 +1851,7 @@ out:
 
 static NOINLINE int do_tmem_destroy_pool(uint32_t pool_id)
 {
-    client_t *client = tmh_client_from_current();
+    client_t *client = tmem_client_from_current();
     pool_t *pool;
 
     if ( client->pools == NULL )
@@ -1887,57 +1881,57 @@ static NOINLINE int do_tmem_new_pool(cli_id_t 
this_cli_id,
     int s_poolid, first_unused_s_poolid;
     int i;
 
-    if ( this_cli_id == CLI_ID_NULL )
-        cli_id = tmh_get_cli_id_from_current();
+    if ( this_cli_id == TMEM_CLI_ID_NULL )
+        cli_id = tmem_get_cli_id_from_current();
     else
         cli_id = this_cli_id;
-    tmh_client_info("tmem: allocating %s-%s tmem pool for %s=%d...",
+    tmem_client_info("tmem: allocating %s-%s tmem pool for %s=%d...",
         persistent ? "persistent" : "ephemeral" ,
-        shared ? "shared" : "private", cli_id_str, cli_id);
+        shared ? "shared" : "private", tmem_cli_id_str, cli_id);
     if ( specversion != TMEM_SPEC_VERSION )
     {
-        tmh_client_err("failed... unsupported spec version\n");
+        tmem_client_err("failed... unsupported spec version\n");
         return -EPERM;
     }
     if ( pagebits != (PAGE_SHIFT - 12) )
     {
-        tmh_client_err("failed... unsupported pagesize %d\n",
+        tmem_client_err("failed... unsupported pagesize %d\n",
                        1 << (pagebits + 12));
         return -EPERM;
     }
     if ( flags & TMEM_POOL_PRECOMPRESSED )
     {
-        tmh_client_err("failed... precompression flag set but unsupported\n");
+        tmem_client_err("failed... precompression flag set but unsupported\n");
         return -EPERM;
     }
     if ( flags & TMEM_POOL_RESERVED_BITS )
     {
-        tmh_client_err("failed... reserved bits must be zero\n");
+        tmem_client_err("failed... reserved bits must be zero\n");
         return -EPERM;
     }
     if ( (pool = pool_alloc()) == NULL )
     {
-        tmh_client_err("failed... out of memory\n");
+        tmem_client_err("failed... out of memory\n");
         return -ENOMEM;
     }
-    if ( this_cli_id != CLI_ID_NULL )
+    if ( this_cli_id != TMEM_CLI_ID_NULL )
     {
-        if ( (client = tmh_client_from_cli_id(this_cli_id)) == NULL
+        if ( (client = tmem_client_from_cli_id(this_cli_id)) == NULL
              || d_poolid >= MAX_POOLS_PER_DOMAIN
              || client->pools[d_poolid] != NULL )
             goto fail;
     }
     else
     {
-        client = tmh_client_from_current();
+        client = tmem_client_from_current();
         ASSERT(client != NULL);
         for ( d_poolid = 0; d_poolid < MAX_POOLS_PER_DOMAIN; d_poolid++ )
             if ( client->pools[d_poolid] == NULL )
                 break;
         if ( d_poolid >= MAX_POOLS_PER_DOMAIN )
         {
-            tmh_client_err("failed... no more pool slots available for this 
%s\n",
-                   client_str);
+            tmem_client_err("failed... no more pool slots available for this 
%s\n",
+                   tmem_client_str);
             goto fail;
         }
     }
@@ -1966,7 +1960,7 @@ static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
             {
                 if ( shpool->uuid[0] == uuid_lo && shpool->uuid[1] == uuid_hi )
                 {
-                    tmh_client_info("(matches shared pool 
uuid=%"PRIx64".%"PRIx64") pool_id=%d\n",
+                    tmem_client_info("(matches shared pool 
uuid=%"PRIx64".%"PRIx64") pool_id=%d\n",
                         uuid_hi, uuid_lo, d_poolid);
                     client->pools[d_poolid] = global_shared_pools[s_poolid];
                     shared_pool_join(global_shared_pools[s_poolid], client);
@@ -1979,7 +1973,7 @@ static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
         }
         if ( first_unused_s_poolid == MAX_GLOBAL_SHARED_POOLS )
         {
-            tmh_client_warn("tmem: failed... no global shared pool slots 
available\n");
+            tmem_client_warn("tmem: failed... no global shared pool slots 
available\n");
             goto fail;
         }
         else
@@ -1995,7 +1989,7 @@ static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
     pool->pool_id = d_poolid;
     pool->persistent = persistent;
     pool->uuid[0] = uuid_lo; pool->uuid[1] = uuid_hi;
-    tmh_client_info("pool_id=%d\n", d_poolid);
+    tmem_client_info("pool_id=%d\n", d_poolid);
     return d_poolid;
 
 fail:
@@ -2014,19 +2008,19 @@ static int tmemc_freeze_pools(cli_id_t cli_id, int arg)
     char *s;
 
     s = destroy ? "destroyed" : ( freeze ? "frozen" : "thawed" );
-    if ( cli_id == CLI_ID_NULL )
+    if ( cli_id == TMEM_CLI_ID_NULL )
     {
         list_for_each_entry(client,&global_client_list,client_list)
             client_freeze(client,freeze);
-        tmh_client_info("tmem: all pools %s for all %ss\n", s, client_str);
+        tmem_client_info("tmem: all pools %s for all %ss\n", s, 
tmem_client_str);
     }
     else
     {
-        if ( (client = tmh_client_from_cli_id(cli_id)) == NULL)
+        if ( (client = tmem_client_from_cli_id(cli_id)) == NULL)
             return -1;
         client_freeze(client,freeze);
-        tmh_client_info("tmem: all pools %s for %s=%d\n",
-                         s, cli_id_str, cli_id);
+        tmem_client_info("tmem: all pools %s for %s=%d\n",
+                         s, tmem_cli_id_str, cli_id);
     }
     return 0;
 }
@@ -2035,10 +2029,10 @@ static int tmemc_flush_mem(cli_id_t cli_id, uint32_t kb)
 {
     uint32_t npages, flushed_pages, flushed_kb;
 
-    if ( cli_id != CLI_ID_NULL )
+    if ( cli_id != TMEM_CLI_ID_NULL )
     {
-        tmh_client_warn("tmem: %s-specific flush not supported yet, use 
--all\n",
-           client_str);
+        tmem_client_warn("tmem: %s-specific flush not supported yet, use 
--all\n",
+           tmem_client_str);
         return -1;
     }
     /* convert kb to pages, rounding up if necessary */
@@ -2078,7 +2072,7 @@ static int tmemc_list_client(client_t *c, 
tmem_cli_va_param_t buf,
              c->eph_count, c->eph_count_max,
              c->compressed_pages, c->compressed_sum_size,
              c->compress_poor, c->compress_nomem);
-    tmh_copy_to_client_buf_offset(buf,off+sum,info,n+1);
+    tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
     sum += n;
     for ( i = 0; i < MAX_POOLS_PER_DOMAIN; i++ )
     {
@@ -2106,7 +2100,7 @@ static int tmemc_list_client(client_t *c, 
tmem_cli_va_param_t buf,
              p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs);
         if ( sum + n >= len )
             return sum;
-        tmh_copy_to_client_buf_offset(buf,off+sum,info,n+1);
+        tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
         sum += n;
     }
     return sum;
@@ -2145,7 +2139,7 @@ static int tmemc_list_shared(tmem_cli_va_param_t buf, int 
off, uint32_t len,
              p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs);
         if ( sum + n >= len )
             return sum;
-        tmh_copy_to_client_buf_offset(buf,off+sum,info,n+1);
+        tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
         sum += n;
     }
     return sum;
@@ -2172,7 +2166,7 @@ static int tmemc_list_global_perf(tmem_cli_va_param_t 
buf, int off,
     n += scnprintf(info+n,BSIZE-n,"\n");
     if ( sum + n >= len )
         return sum;
-    tmh_copy_to_client_buf_offset(buf,off+sum,info,n+1);
+    tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
     sum += n;
     return sum;
 }
@@ -2190,7 +2184,7 @@ static int tmemc_list_global(tmem_cli_va_param_t buf, int 
off, uint32_t len,
       "Tt:%lu,Te:%lu,Cf:%lu,Af:%lu,Pf:%lu,Ta:%lu,"
       "Lm:%lu,Et:%lu,Ea:%lu,Rt:%lu,Ra:%lu,Rx:%lu,Fp:%lu%c",
       total_tmem_ops, errored_tmem_ops, failed_copies,
-      alloc_failed, alloc_page_failed, tmh_avail_pages(),
+      alloc_failed, alloc_page_failed, tmem_page_list_pages,
       low_on_memory, evicted_pgs,
       evict_attempts, relinq_pgs, relinq_attempts, max_evicts_per_relinq,
       total_flush_pool, use_long ? ',' : '\n');
@@ -2207,7 +2201,7 @@ static int tmemc_list_global(tmem_cli_va_param_t buf, int 
off, uint32_t len,
          tot_good_eph_puts,deduped_puts,pcd_tot_tze_size,pcd_tot_csize);
     if ( sum + n >= len )
         return sum;
-    tmh_copy_to_client_buf_offset(buf,off+sum,info,n+1);
+    tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
     sum += n;
     return sum;
 }
@@ -2218,14 +2212,14 @@ static int tmemc_list(cli_id_t cli_id, 
tmem_cli_va_param_t buf, uint32_t len,
     client_t *client;
     int off = 0;
 
-    if ( cli_id == CLI_ID_NULL ) {
+    if ( cli_id == TMEM_CLI_ID_NULL ) {
         off = tmemc_list_global(buf,0,len,use_long);
         off += tmemc_list_shared(buf,off,len-off,use_long);
         list_for_each_entry(client,&global_client_list,client_list)
             off += tmemc_list_client(client, buf, off, len-off, use_long);
         off += tmemc_list_global_perf(buf,off,len-off,use_long);
     }
-    else if ( (client = tmh_client_from_cli_id(cli_id)) == NULL)
+    else if ( (client = tmem_client_from_cli_id(cli_id)) == NULL)
         return -1;
     else
         off = tmemc_list_client(client, buf, 0, len, use_long);
@@ -2243,30 +2237,30 @@ static int tmemc_set_var_one(client_t *client, uint32_t 
subop, uint32_t arg1)
     case TMEMC_SET_WEIGHT:
         old_weight = client->weight;
         client->weight = arg1;
-        tmh_client_info("tmem: weight set to %d for %s=%d\n",
-                        arg1, cli_id_str, cli_id);
+        tmem_client_info("tmem: weight set to %d for %s=%d\n",
+                        arg1, tmem_cli_id_str, cli_id);
         atomic_sub(old_weight,&client_weight_total);
         atomic_add(client->weight,&client_weight_total);
         break;
     case TMEMC_SET_CAP:
         client->cap = arg1;
-        tmh_client_info("tmem: cap set to %d for %s=%d\n",
-                        arg1, cli_id_str, cli_id);
+        tmem_client_info("tmem: cap set to %d for %s=%d\n",
+                        arg1, tmem_cli_id_str, cli_id);
         break;
     case TMEMC_SET_COMPRESS:
-        if ( tmh_dedup_enabled() )
+        if ( tmem_dedup_enabled() )
         {
-            tmh_client_warn("tmem: compression %s for all %ss, cannot be 
changed when tmem_dedup is enabled\n",
-                            tmh_compression_enabled() ? "enabled" : "disabled",
-                            client_str);
+            tmem_client_warn("tmem: compression %s for all %ss, cannot be 
changed when tmem_dedup is enabled\n",
+                            tmem_compression_enabled() ? "enabled" : 
"disabled",
+                            tmem_client_str);
             return -1;
         }
         client->compress = arg1 ? 1 : 0;
-        tmh_client_info("tmem: compression %s for %s=%d\n",
-            arg1 ? "enabled" : "disabled",cli_id_str,cli_id);
+        tmem_client_info("tmem: compression %s for %s=%d\n",
+            arg1 ? "enabled" : "disabled",tmem_cli_id_str,cli_id);
         break;
     default:
-        tmh_client_warn("tmem: unknown subop %d for tmemc_set_var\n", subop);
+        tmem_client_warn("tmem: unknown subop %d for tmemc_set_var\n", subop);
         return -1;
     }
     return 0;
@@ -2276,10 +2270,10 @@ static int tmemc_set_var(cli_id_t cli_id, uint32_t 
subop, uint32_t arg1)
 {
     client_t *client;
 
-    if ( cli_id == CLI_ID_NULL )
+    if ( cli_id == TMEM_CLI_ID_NULL )
         list_for_each_entry(client,&global_client_list,client_list)
             tmemc_set_var_one(client, subop, arg1);
-    else if ( (client = tmh_client_from_cli_id(cli_id)) == NULL)
+    else if ( (client = tmem_client_from_cli_id(cli_id)) == NULL)
         return -1;
     else
         tmemc_set_var_one(client, subop, arg1);
@@ -2292,12 +2286,12 @@ static NOINLINE int tmemc_shared_pool_auth(cli_id_t 
cli_id, uint64_t uuid_lo,
     client_t *client;
     int i, free = -1;
 
-    if ( cli_id == CLI_ID_NULL )
+    if ( cli_id == TMEM_CLI_ID_NULL )
     {
         global_shared_auth = auth;
         return 1;
     }
-    client = tmh_client_from_cli_id(cli_id);
+    client = tmem_client_from_cli_id(cli_id);
     if ( client == NULL )
         return -EINVAL;
     for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
@@ -2326,7 +2320,7 @@ static NOINLINE int tmemc_shared_pool_auth(cli_id_t 
cli_id, uint64_t uuid_lo,
 static NOINLINE int tmemc_save_subop(int cli_id, uint32_t pool_id,
                         uint32_t subop, tmem_cli_va_param_t buf, uint32_t arg1)
 {
-    client_t *client = tmh_client_from_cli_id(cli_id);
+    client_t *client = tmem_client_from_cli_id(cli_id);
     pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
     uint32_t p;
@@ -2394,7 +2388,7 @@ static NOINLINE int tmemc_save_subop(int cli_id, uint32_t 
pool_id,
     case TMEMC_SAVE_GET_POOL_UUID:
          if ( pool == NULL )
              break;
-        tmh_copy_to_client_buf(buf, pool->uuid, 2);
+        tmem_copy_to_client_buf(buf, pool->uuid, 2);
         rc = 0;
         break;
     case TMEMC_SAVE_END:
@@ -2415,7 +2409,7 @@ static NOINLINE int tmemc_save_subop(int cli_id, uint32_t 
pool_id,
 static NOINLINE int tmemc_save_get_next_page(int cli_id, uint32_t pool_id,
                         tmem_cli_va_param_t buf, uint32_t bufsize)
 {
-    client_t *client = tmh_client_from_cli_id(cli_id);
+    client_t *client = tmem_client_from_cli_id(cli_id);
     pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
     pgp_t *pgp;
@@ -2458,8 +2452,8 @@ static NOINLINE int tmemc_save_get_next_page(int cli_id, 
uint32_t pool_id,
     BUILD_BUG_ON(sizeof(h.oid) != sizeof(oid));
     memcpy(h.oid, oid.oid, sizeof(h.oid));
     h.index = pgp->index;
-    tmh_copy_to_client_buf(buf, &h, 1);
-    tmh_client_buf_add(buf, sizeof(h));
+    tmem_copy_to_client_buf(buf, &h, 1);
+    tmem_client_buf_add(buf, sizeof(h));
     ret = do_tmem_get(pool, &oid, pgp->index, 0, 0, 0, pagesize, buf);
 
 out:
@@ -2470,7 +2464,7 @@ out:
 static NOINLINE int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_param_t 
buf,
                         uint32_t bufsize)
 {
-    client_t *client = tmh_client_from_cli_id(cli_id);
+    client_t *client = tmem_client_from_cli_id(cli_id);
     pgp_t *pgp;
     struct tmem_handle h;
     int ret = 0;
@@ -2502,7 +2496,7 @@ static NOINLINE int tmemc_save_get_next_inv(int cli_id, 
tmem_cli_va_param_t buf,
     BUILD_BUG_ON(sizeof(h.oid) != sizeof(pgp->inv_oid));
     memcpy(h.oid, pgp->inv_oid.oid, sizeof(h.oid));
     h.index = pgp->index;
-    tmh_copy_to_client_buf(buf, &h, 1);
+    tmem_copy_to_client_buf(buf, &h, 1);
     ret = 1;
 out:
     tmem_spin_unlock(&pers_lists_spinlock);
@@ -2512,7 +2506,7 @@ out:
 static int tmemc_restore_put_page(int cli_id, uint32_t pool_id, OID *oidp,
                       uint32_t index, tmem_cli_va_param_t buf, uint32_t 
bufsize)
 {
-    client_t *client = tmh_client_from_cli_id(cli_id);
+    client_t *client = tmem_client_from_cli_id(cli_id);
     pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
 
@@ -2524,7 +2518,7 @@ static int tmemc_restore_put_page(int cli_id, uint32_t 
pool_id, OID *oidp,
 static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id, OID *oidp,
                         uint32_t index)
 {
-    client_t *client = tmh_client_from_cli_id(cli_id);
+    client_t *client = tmem_client_from_cli_id(cli_id);
     pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
 
@@ -2540,7 +2534,7 @@ static NOINLINE int do_tmem_control(struct tmem_op *op)
     uint32_t subop = op->u.ctrl.subop;
     OID *oidp = (OID *)(&op->u.ctrl.oid[0]);
 
-    if (!tmh_current_is_privileged())
+    if (!tmem_current_is_privileged())
         return -EPERM;
 
     switch(subop)
@@ -2564,7 +2558,7 @@ static NOINLINE int do_tmem_control(struct tmem_op *op)
         ret = tmemc_set_var(op->u.ctrl.cli_id,subop,op->u.ctrl.arg1);
         break;
     case TMEMC_QUERY_FREEABLE_MB:
-        ret = tmh_freeable_pages() >> (20 - PAGE_SHIFT);
+        ret = tmem_freeable_pages() >> (20 - PAGE_SHIFT);
         break;
     case TMEMC_SAVE_BEGIN:
     case TMEMC_RESTORE_BEGIN:
@@ -2612,7 +2606,7 @@ static NOINLINE int do_tmem_control(struct tmem_op *op)
 EXPORT long do_tmem_op(tmem_cli_op_t uops)
 {
     struct tmem_op op;
-    client_t *client = tmh_client_from_current();
+    client_t *client = tmem_client_from_current();
     pool_t *pool = NULL;
     OID *oidp;
     int rc = 0;
@@ -2630,14 +2624,14 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops)
     if ( !tmem_initialized )
         return -ENODEV;
 
-    if ( !tmh_current_permitted() )
+    if ( !tmem_current_permitted() )
         return -EPERM;
 
     total_tmem_ops++;
 
-    if ( tmh_lock_all )
+    if ( tmem_lock_all )
     {
-        if ( tmh_lock_all > 1 )
+        if ( tmem_lock_all > 1 )
             spin_lock_irq(&tmem_spinlock);
         else
             spin_lock(&tmem_spinlock);
@@ -2650,21 +2644,21 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops)
     DUP_START_CYC_COUNTER(flush,succ_get);
     DUP_START_CYC_COUNTER(flush_obj,succ_get);
 
-    if ( client != NULL && tmh_client_is_dying(client) )
+    if ( client != NULL && tmem_client_is_dying(client) )
     {
         rc = -ENODEV;
-        if ( tmh_lock_all )
+        if ( tmem_lock_all )
             goto out;
  simple_error:
         errored_tmem_ops++;
         return rc;
     }
 
-    if ( unlikely(tmh_get_tmemop_from_client(&op, uops) != 0) )
+    if ( unlikely(tmem_get_tmemop_from_client(&op, uops) != 0) )
     {
-        tmh_client_err("tmem: can't get tmem struct from %s\n", client_str);
+        tmem_client_err("tmem: can't get tmem struct from %s\n", 
tmem_client_str);
         rc = -EFAULT;
-        if ( !tmh_lock_all )
+        if ( !tmem_lock_all )
             goto simple_error;
         goto out;
     }
@@ -2694,10 +2688,10 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops)
     {
         tmem_write_lock(&tmem_rwlock);
         tmem_write_lock_set = 1;
-        if ( (client = client_create(tmh_get_cli_id_from_current())) == NULL )
+        if ( (client = client_create(tmem_get_cli_id_from_current())) == NULL )
         {
-            tmh_client_err("tmem: can't create tmem structure for %s\n",
-                           client_str);
+            tmem_client_err("tmem: can't create tmem structure for %s\n",
+                           tmem_client_str);
             rc = -ENOMEM;
             goto out;
         }
@@ -2721,7 +2715,7 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops)
         if ( ((uint32_t)op.pool_id >= MAX_POOLS_PER_DOMAIN) ||
              ((pool = client->pools[op.pool_id]) == NULL) )
         {
-            tmh_client_err("tmem: operation requested on uncreated pool\n");
+            tmem_client_err("tmem: operation requested on uncreated pool\n");
             rc = -ENODEV;
             goto out;
         }
@@ -2732,24 +2726,24 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops)
     switch ( op.cmd )
     {
     case TMEM_NEW_POOL:
-        rc = do_tmem_new_pool(CLI_ID_NULL, 0, op.u.creat.flags,
+        rc = do_tmem_new_pool(TMEM_CLI_ID_NULL, 0, op.u.creat.flags,
                               op.u.creat.uuid[0], op.u.creat.uuid[1]);
         break;
     case TMEM_NEW_PAGE:
         tmem_ensure_avail_pages();
         rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, 0, 0, 0,
-                         tmh_cli_buf_null);
+                         tmem_cli_buf_null);
         break;
     case TMEM_PUT_PAGE:
         tmem_ensure_avail_pages();
         rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, 0, 0,
-                         PAGE_SIZE, tmh_cli_buf_null);
+                         PAGE_SIZE, tmem_cli_buf_null);
         if (rc == 1) succ_put = 1;
         else non_succ_put = 1;
         break;
     case TMEM_GET_PAGE:
         rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
-                         0, 0, PAGE_SIZE, tmh_cli_buf_null);
+                         0, 0, PAGE_SIZE, tmem_cli_buf_null);
         if (rc == 1) succ_get = 1;
         else non_succ_get = 1;
         break;
@@ -2768,21 +2762,21 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops)
     case TMEM_READ:
         rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
                          op.u.gen.tmem_offset, op.u.gen.pfn_offset,
-                         op.u.gen.len, tmh_cli_buf_null);
+                         op.u.gen.len, tmem_cli_buf_null);
         break;
     case TMEM_WRITE:
         rc = do_tmem_put(pool, oidp,
                          op.u.gen.index, op.u.gen.cmfn,
                          op.u.gen.tmem_offset, op.u.gen.pfn_offset,
-                         op.u.gen.len, tmh_cli_buf_null);
+                         op.u.gen.len, tmem_cli_buf_null);
         break;
     case TMEM_XCHG:
         /* need to hold global lock to ensure xchg is atomic */
-        tmh_client_warn("tmem_xchg op not implemented yet\n");
+        tmem_client_warn("tmem_xchg op not implemented yet\n");
         rc = 0;
         break;
     default:
-        tmh_client_warn("tmem: op %d not implemented\n", op.cmd);
+        tmem_client_warn("tmem: op %d not implemented\n", op.cmd);
         rc = 0;
         break;
     }
@@ -2803,9 +2797,9 @@ out:
     else if ( flush_obj )
         END_CYC_COUNTER_CLI(flush_obj,client);
 
-    if ( tmh_lock_all )
+    if ( tmem_lock_all )
     {
-        if ( tmh_lock_all > 1 )
+        if ( tmem_lock_all > 1 )
             spin_unlock_irq(&tmem_spinlock);
         else
             spin_unlock(&tmem_spinlock);
@@ -2829,22 +2823,22 @@ EXPORT void tmem_destroy(void *v)
     if ( client == NULL )
         return;
 
-    if ( !tmh_client_is_dying(client) )
+    if ( !tmem_client_is_dying(client) )
     {
         printk("tmem: tmem_destroy can only destroy dying client\n");
         return;
     }
 
-    if ( tmh_lock_all )
+    if ( tmem_lock_all )
         spin_lock(&tmem_spinlock);
     else
         write_lock(&tmem_rwlock);
 
     printk("tmem: flushing tmem pools for %s=%d\n",
-           cli_id_str, client->cli_id);
+           tmem_cli_id_str, client->cli_id);
     client_flush(client, 1);
 
-    if ( tmh_lock_all )
+    if ( tmem_lock_all )
         spin_unlock(&tmem_spinlock);
     else
         write_unlock(&tmem_rwlock);
@@ -2855,15 +2849,15 @@ EXPORT void tmem_freeze_all(unsigned char key)
 {
     static int freeze = 0;
  
-    if ( tmh_lock_all )
+    if ( tmem_lock_all )
         spin_lock(&tmem_spinlock);
     else
         write_lock(&tmem_rwlock);
 
     freeze = !freeze;
-    tmemc_freeze_pools(CLI_ID_NULL,freeze);
+    tmemc_freeze_pools(TMEM_CLI_ID_NULL,freeze);
 
-    if ( tmh_lock_all )
+    if ( tmem_lock_all )
         spin_unlock(&tmem_spinlock);
     else
         write_unlock(&tmem_rwlock);
@@ -2877,7 +2871,7 @@ EXPORT void *tmem_relinquish_pages(unsigned int order, 
unsigned int memflags)
     unsigned long evicts_per_relinq = 0;
     int max_evictions = 10;
 
-    if (!tmh_enabled() || !tmh_freeable_pages())
+    if (!tmem_enabled() || !tmem_freeable_pages())
         return NULL;
 
     relinq_attempts++;
@@ -2889,15 +2883,15 @@ EXPORT void *tmem_relinquish_pages(unsigned int order, 
unsigned int memflags)
         return NULL;
     }
 
-    if ( tmh_called_from_tmem(memflags) )
+    if ( tmem_called_from_tmem(memflags) )
     {
-        if ( tmh_lock_all )
+        if ( tmem_lock_all )
             spin_lock(&tmem_spinlock);
         else
             read_lock(&tmem_rwlock);
     }
 
-    while ( (pfp = tmh_alloc_page(NULL,1)) == NULL )
+    while ( (pfp = tmem_alloc_page(NULL,1)) == NULL )
     {
         if ( (max_evictions-- <= 0) || !tmem_evict())
             break;
@@ -2905,13 +2899,13 @@ EXPORT void *tmem_relinquish_pages(unsigned int order, 
unsigned int memflags)
     }
     if ( evicts_per_relinq > max_evicts_per_relinq )
         max_evicts_per_relinq = evicts_per_relinq;
-    tmh_scrub_page(pfp, memflags);
+    tmem_scrub_page(pfp, memflags);
     if ( pfp != NULL )
         relinq_pgs++;
 
-    if ( tmh_called_from_tmem(memflags) )
+    if ( tmem_called_from_tmem(memflags) )
     {
-        if ( tmh_lock_all )
+        if ( tmem_lock_all )
             spin_unlock(&tmem_spinlock);
         else
             read_unlock(&tmem_rwlock);
@@ -2920,33 +2914,33 @@ EXPORT void *tmem_relinquish_pages(unsigned int order, 
unsigned int memflags)
     return pfp;
 }
 
-EXPORT unsigned long tmem_freeable_pages(void)
+unsigned long tmem_freeable_pages(void)
 {
-    return tmh_freeable_pages();
+    return tmem_page_list_pages + _atomic_read(freeable_page_count);
 }
 
 /* called at hypervisor startup */
 static int __init init_tmem(void)
 {
     int i;
-    if ( !tmh_enabled() )
+    if ( !tmem_enabled() )
         return 0;
 
-    if ( tmh_dedup_enabled() )
+    if ( tmem_dedup_enabled() )
         for (i = 0; i < 256; i++ )
         {
             pcd_tree_roots[i] = RB_ROOT;
             rwlock_init(&pcd_tree_rwlocks[i]);
         }
 
-    if ( tmh_init() )
+    if ( tmem_init() )
     {
         printk("tmem: initialized comp=%d dedup=%d tze=%d global-lock=%d\n",
-            tmh_compression_enabled(), tmh_dedup_enabled(), tmh_tze_enabled(),
-            tmh_lock_all);
-        if ( tmh_dedup_enabled()&&tmh_compression_enabled()&&tmh_tze_enabled() 
)
+            tmem_compression_enabled(), tmem_dedup_enabled(), 
tmem_tze_enabled(),
+            tmem_lock_all);
+        if ( 
tmem_dedup_enabled()&&tmem_compression_enabled()&&tmem_tze_enabled() )
         {
-            tmh_tze_disable();
+            tmem_tze_disable();
             printk("tmem: tze and compression not compatible, disabling 
tze\n");
         }
         tmem_initialized = 1;
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index e1e83d2..bb2b601 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -46,7 +46,7 @@ static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, workmem);
 static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, dstmem);
 static DEFINE_PER_CPU_READ_MOSTLY(void *, scratch_page);
 
-void tmh_copy_page(char *to, char*from)
+void tmem_copy_page(char *to, char*from)
 {
     DECL_LOCAL_CYC_COUNTER(pg_copy);
     START_CYC_COUNTER(pg_copy);
@@ -109,7 +109,7 @@ static inline void cli_put_page(void *cli_va, struct 
page_info *cli_pfp,
 }
 #endif
 
-EXPORT int tmh_copy_from_client(struct page_info *pfp,
+EXPORT int tmem_copy_from_client(struct page_info *pfp,
     xen_pfn_t cmfn, pagesize_t tmem_offset,
     pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf)
 {
@@ -140,7 +140,7 @@ EXPORT int tmh_copy_from_client(struct page_info *pfp,
     }
     smp_mb();
     if ( len == PAGE_SIZE && !tmem_offset && !pfn_offset && cli_va )
-        tmh_copy_page(tmem_va, cli_va);
+        tmem_copy_page(tmem_va, cli_va);
     else if ( (tmem_offset+len <= PAGE_SIZE) &&
               (pfn_offset+len <= PAGE_SIZE) )
     {
@@ -158,7 +158,7 @@ EXPORT int tmh_copy_from_client(struct page_info *pfp,
     return rc;
 }
 
-EXPORT int tmh_compress_from_client(xen_pfn_t cmfn,
+EXPORT int tmem_compress_from_client(xen_pfn_t cmfn,
     void **out_va, size_t *out_len, tmem_cli_va_param_t clibuf)
 {
     int ret = 0;
@@ -190,7 +190,7 @@ EXPORT int tmh_compress_from_client(xen_pfn_t cmfn,
     return 1;
 }
 
-EXPORT int tmh_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp,
+EXPORT int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp,
     pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len,
     tmem_cli_va_param_t clibuf)
 {
@@ -211,7 +211,7 @@ EXPORT int tmh_copy_to_client(xen_pfn_t cmfn, struct 
page_info *pfp,
     tmem_mfn = page_to_mfn(pfp);
     tmem_va = map_domain_page(tmem_mfn);
     if ( len == PAGE_SIZE && !tmem_offset && !pfn_offset && cli_va )
-        tmh_copy_page(cli_va, tmem_va);
+        tmem_copy_page(cli_va, tmem_va);
     else if ( (tmem_offset+len <= PAGE_SIZE) && (pfn_offset+len <= PAGE_SIZE) )
     {
         if ( cli_va )
@@ -229,7 +229,7 @@ EXPORT int tmh_copy_to_client(xen_pfn_t cmfn, struct 
page_info *pfp,
     return rc;
 }
 
-EXPORT int tmh_decompress_to_client(xen_pfn_t cmfn, void *tmem_va,
+EXPORT int tmem_decompress_to_client(xen_pfn_t cmfn, void *tmem_va,
                                     size_t size, tmem_cli_va_param_t clibuf)
 {
     unsigned long cli_mfn = 0;
@@ -258,7 +258,7 @@ EXPORT int tmh_decompress_to_client(xen_pfn_t cmfn, void 
*tmem_va,
     return 1;
 }
 
-EXPORT int tmh_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va,
+EXPORT int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va,
                                     pagesize_t len)
 {
     void *cli_va;
@@ -282,30 +282,30 @@ EXPORT int tmh_copy_tze_to_client(xen_pfn_t cmfn, void 
*tmem_va,
 
 /******************  XEN-SPECIFIC MEMORY ALLOCATION ********************/
 
-EXPORT struct xmem_pool *tmh_mempool = 0;
-EXPORT unsigned int tmh_mempool_maxalloc = 0;
+EXPORT struct xmem_pool *tmem_mempool = 0;
+EXPORT unsigned int tmem_mempool_maxalloc = 0;
 
-EXPORT DEFINE_SPINLOCK(tmh_page_list_lock);
-EXPORT PAGE_LIST_HEAD(tmh_page_list);
-EXPORT unsigned long tmh_page_list_pages = 0;
+EXPORT DEFINE_SPINLOCK(tmem_page_list_lock);
+EXPORT PAGE_LIST_HEAD(tmem_page_list);
+EXPORT unsigned long tmem_page_list_pages = 0;
 
-/* free anything on tmh_page_list to Xen's scrub list */
-EXPORT void tmh_release_avail_pages_to_host(void)
+/* free anything on tmem_page_list to Xen's scrub list */
+EXPORT void tmem_release_avail_pages_to_host(void)
 {
-    spin_lock(&tmh_page_list_lock);
-    while ( !page_list_empty(&tmh_page_list) )
+    spin_lock(&tmem_page_list_lock);
+    while ( !page_list_empty(&tmem_page_list) )
     {
-        struct page_info *pg = page_list_remove_head(&tmh_page_list);
+        struct page_info *pg = page_list_remove_head(&tmem_page_list);
         scrub_one_page(pg);
-        tmh_page_list_pages--;
+        tmem_page_list_pages--;
         free_domheap_page(pg);
     }
-    ASSERT(tmh_page_list_pages == 0);
-    INIT_PAGE_LIST_HEAD(&tmh_page_list);
-    spin_unlock(&tmh_page_list_lock);
+    ASSERT(tmem_page_list_pages == 0);
+    INIT_PAGE_LIST_HEAD(&tmem_page_list);
+    spin_unlock(&tmem_page_list_lock);
 }
 
-EXPORT void tmh_scrub_page(struct page_info *pi, unsigned int memflags)
+EXPORT void tmem_scrub_page(struct page_info *pi, unsigned int memflags)
 {
     if ( pi == NULL )
         return;
@@ -313,84 +313,84 @@ EXPORT void tmh_scrub_page(struct page_info *pi, unsigned 
int memflags)
         scrub_one_page(pi);
 }
 
-static noinline void *tmh_mempool_page_get(unsigned long size)
+static noinline void *tmem_mempool_page_get(unsigned long size)
 {
     struct page_info *pi;
 
     ASSERT(size == PAGE_SIZE);
-    if ( (pi = tmh_alloc_page(NULL,0)) == NULL )
+    if ( (pi = tmem_alloc_page(NULL,0)) == NULL )
         return NULL;
     ASSERT(IS_VALID_PAGE(pi));
     return page_to_virt(pi);
 }
 
-static void tmh_mempool_page_put(void *page_va)
+static void tmem_mempool_page_put(void *page_va)
 {
     ASSERT(IS_PAGE_ALIGNED(page_va));
-    tmh_free_page(virt_to_page(page_va));
+    tmem_free_page(virt_to_page(page_va));
 }
 
-static int __init tmh_mempool_init(void)
+static int __init tmem_mempool_init(void)
 {
-    tmh_mempool = xmem_pool_create("tmem", tmh_mempool_page_get,
-        tmh_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
-    if ( tmh_mempool )
-        tmh_mempool_maxalloc = xmem_pool_maxalloc(tmh_mempool);
-    return tmh_mempool != NULL;
+    tmem_mempool = xmem_pool_create("tmem", tmem_mempool_page_get,
+        tmem_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
+    if ( tmem_mempool )
+        tmem_mempool_maxalloc = xmem_pool_maxalloc(tmem_mempool);
+    return tmem_mempool != NULL;
 }
 
 /* persistent pools are per-domain */
 
-static void *tmh_persistent_pool_page_get(unsigned long size)
+static void *tmem_persistent_pool_page_get(unsigned long size)
 {
     struct page_info *pi;
     struct domain *d = current->domain;
 
     ASSERT(size == PAGE_SIZE);
-    if ( (pi = _tmh_alloc_page_thispool(d)) == NULL )
+    if ( (pi = _tmem_alloc_page_thispool(d)) == NULL )
         return NULL;
     ASSERT(IS_VALID_PAGE(pi));
     return page_to_virt(pi);
 }
 
-static void tmh_persistent_pool_page_put(void *page_va)
+static void tmem_persistent_pool_page_put(void *page_va)
 {
     struct page_info *pi;
 
     ASSERT(IS_PAGE_ALIGNED(page_va));
     pi = mfn_to_page(virt_to_mfn(page_va));
     ASSERT(IS_VALID_PAGE(pi));
-    _tmh_free_page_thispool(pi);
+    _tmem_free_page_thispool(pi);
 }
 
 /******************  XEN-SPECIFIC CLIENT HANDLING ********************/
 
-EXPORT tmh_client_t *tmh_client_init(cli_id_t cli_id)
+EXPORT tmem_client_t *tmem_client_init(cli_id_t cli_id)
 {
-    tmh_client_t *tmh;
+    tmem_client_t *tmem;
     char name[5];
     int i, shift;
 
-    if ( (tmh = xmalloc(tmh_client_t)) == NULL )
+    if ( (tmem = xmalloc(tmem_client_t)) == NULL )
         return NULL;
     for (i = 0, shift = 12; i < 4; shift -=4, i++)
         name[i] = (((unsigned short)cli_id >> shift) & 0xf) + '0';
     name[4] = '\0';
-    tmh->persistent_pool = xmem_pool_create(name, tmh_persistent_pool_page_get,
-        tmh_persistent_pool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
-    if ( tmh->persistent_pool == NULL )
+    tmem->persistent_pool = xmem_pool_create(name, 
tmem_persistent_pool_page_get,
+        tmem_persistent_pool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
+    if ( tmem->persistent_pool == NULL )
     {
-        xfree(tmh);
+        xfree(tmem);
         return NULL;
     }
-    return tmh;
+    return tmem;
 }
 
-EXPORT void tmh_client_destroy(tmh_client_t *tmh)
+EXPORT void tmem_client_destroy(tmem_client_t *tmem)
 {
-    ASSERT(tmh->domain->is_dying);
-    xmem_pool_destroy(tmh->persistent_pool);
-    tmh->domain = NULL;
+    ASSERT(tmem->domain->is_dying);
+    xmem_pool_destroy(tmem->persistent_pool);
+    tmem->domain = NULL;
 }
 
 /******************  XEN-SPECIFIC HOST INITIALIZATION ********************/
@@ -443,11 +443,11 @@ static struct notifier_block cpu_nfb = {
     .notifier_call = cpu_callback
 };
 
-EXPORT int __init tmh_init(void)
+EXPORT int __init tmem_init(void)
 {
     unsigned int cpu;
 
-    if ( !tmh_mempool_init() )
+    if ( !tmem_mempool_init() )
         return 0;
 
     dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index b24246c..dc37861 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -26,7 +26,7 @@ struct tmem_host_dependent_client {
     struct domain *domain;
     struct xmem_pool *persistent_pool;
 };
-typedef struct tmem_host_dependent_client tmh_client_t;
+typedef struct tmem_host_dependent_client tmem_client_t;
 
 typedef uint32_t pagesize_t;  /* like size_t, must handle largest PAGE_SIZE */
 
@@ -34,55 +34,55 @@ typedef uint32_t pagesize_t;  /* like size_t, must handle 
largest PAGE_SIZE */
   ((void *)((((unsigned long)addr + (PAGE_SIZE - 1)) & PAGE_MASK)) == addr)
 #define IS_VALID_PAGE(_pi)  ( mfn_valid(page_to_mfn(_pi)) )
 
-extern struct xmem_pool *tmh_mempool;
-extern unsigned int tmh_mempool_maxalloc;
-extern struct page_list_head tmh_page_list;
-extern spinlock_t tmh_page_list_lock;
-extern unsigned long tmh_page_list_pages;
+extern struct xmem_pool *tmem_mempool;
+extern unsigned int tmem_mempool_maxalloc;
+extern struct page_list_head tmem_page_list;
+extern spinlock_t tmem_page_list_lock;
+extern unsigned long tmem_page_list_pages;
 extern atomic_t freeable_page_count;
 
 extern spinlock_t tmem_lock;
 extern spinlock_t tmem_spinlock;
 extern rwlock_t tmem_rwlock;
 
-extern void tmh_copy_page(char *to, char*from);
-extern int tmh_init(void);
-#define tmh_hash hash_long
+extern void tmem_copy_page(char *to, char*from);
+extern int tmem_init(void);
+#define tmem_hash hash_long
 
-extern void tmh_release_avail_pages_to_host(void);
-extern void tmh_scrub_page(struct page_info *pi, unsigned int memflags);
+extern void tmem_release_avail_pages_to_host(void);
+extern void tmem_scrub_page(struct page_info *pi, unsigned int memflags);
 
 extern bool_t opt_tmem_compress;
-static inline bool_t tmh_compression_enabled(void)
+static inline bool_t tmem_compression_enabled(void)
 {
     return opt_tmem_compress;
 }
 
 extern bool_t opt_tmem_dedup;
-static inline bool_t tmh_dedup_enabled(void)
+static inline bool_t tmem_dedup_enabled(void)
 {
     return opt_tmem_dedup;
 }
 
 extern bool_t opt_tmem_tze;
-static inline bool_t tmh_tze_enabled(void)
+static inline bool_t tmem_tze_enabled(void)
 {
     return opt_tmem_tze;
 }
 
-static inline void tmh_tze_disable(void)
+static inline void tmem_tze_disable(void)
 {
     opt_tmem_tze = 0;
 }
 
 extern bool_t opt_tmem_shared_auth;
-static inline bool_t tmh_shared_auth(void)
+static inline bool_t tmem_shared_auth(void)
 {
     return opt_tmem_shared_auth;
 }
 
 extern bool_t opt_tmem;
-static inline bool_t tmh_enabled(void)
+static inline bool_t tmem_enabled(void)
 {
     return opt_tmem;
 }
@@ -93,30 +93,25 @@ extern int opt_tmem_lock;
  * Memory free page list management
  */
 
-static inline struct page_info *tmh_page_list_get(void)
+static inline struct page_info *tmem_page_list_get(void)
 {
     struct page_info *pi;
 
-    spin_lock(&tmh_page_list_lock);
-    if ( (pi = page_list_remove_head(&tmh_page_list)) != NULL )
-        tmh_page_list_pages--;
-    spin_unlock(&tmh_page_list_lock);
+    spin_lock(&tmem_page_list_lock);
+    if ( (pi = page_list_remove_head(&tmem_page_list)) != NULL )
+        tmem_page_list_pages--;
+    spin_unlock(&tmem_page_list_lock);
     ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
     return pi;
 }
 
-static inline void tmh_page_list_put(struct page_info *pi)
+static inline void tmem_page_list_put(struct page_info *pi)
 {
     ASSERT(IS_VALID_PAGE(pi));
-    spin_lock(&tmh_page_list_lock);
-    page_list_add(pi, &tmh_page_list);
-    tmh_page_list_pages++;
-    spin_unlock(&tmh_page_list_lock);
-}
-
-static inline unsigned long tmh_avail_pages(void)
-{
-    return tmh_page_list_pages;
+    spin_lock(&tmem_page_list_lock);
+    page_list_add(pi, &tmem_page_list);
+    tmem_page_list_pages++;
+    spin_unlock(&tmem_page_list_lock);
 }
 
 /*
@@ -127,36 +122,36 @@ static inline bool_t domain_fully_allocated(struct domain 
*d)
 {
     return ( d->tot_pages >= d->max_pages );
 }
-#define tmh_client_memory_fully_allocated(_pool) \
- domain_fully_allocated(_pool->client->tmh->domain)
+#define tmem_client_memory_fully_allocated(_pool) \
+ domain_fully_allocated(_pool->client->tmem->domain)
 
-static inline void *_tmh_alloc_subpage_thispool(struct xmem_pool *cmem_mempool,
+static inline void *_tmem_alloc_subpage_thispool(struct xmem_pool 
*cmem_mempool,
                                                  size_t size, size_t align)
 {
 #if 0
     if ( d->tot_pages >= d->max_pages )
         return NULL;
 #endif
-    ASSERT( size < tmh_mempool_maxalloc );
+    ASSERT( size < tmem_mempool_maxalloc );
     if ( cmem_mempool == NULL )
         return NULL;
     return xmem_pool_alloc(size, cmem_mempool);
 }
-#define tmh_alloc_subpage_thispool(_pool, _s, _a) \
-            _tmh_alloc_subpage_thispool(pool->client->tmh->persistent_pool, \
+#define tmem_alloc_subpage_thispool(_pool, _s, _a) \
+            _tmem_alloc_subpage_thispool(pool->client->tmem->persistent_pool, \
                                          _s, _a)
 
-static inline void _tmh_free_subpage_thispool(struct xmem_pool *cmem_mempool,
+static inline void _tmem_free_subpage_thispool(struct xmem_pool *cmem_mempool,
                                                void *ptr, size_t size)
 {
-    ASSERT( size < tmh_mempool_maxalloc );
+    ASSERT( size < tmem_mempool_maxalloc );
     ASSERT( cmem_mempool != NULL );
     xmem_pool_free(ptr,cmem_mempool);
 }
-#define tmh_free_subpage_thispool(_pool, _p, _s) \
- _tmh_free_subpage_thispool(_pool->client->tmh->persistent_pool, _p, _s)
+#define tmem_free_subpage_thispool(_pool, _p, _s) \
+ _tmem_free_subpage_thispool(_pool->client->tmem->persistent_pool, _p, _s)
 
-static inline struct page_info *_tmh_alloc_page_thispool(struct domain *d)
+static inline struct page_info *_tmem_alloc_page_thispool(struct domain *d)
 {
     struct page_info *pi;
 
@@ -166,14 +161,14 @@ static inline struct page_info 
*_tmh_alloc_page_thispool(struct domain *d)
     if ( d->tot_pages >= d->max_pages )
         return NULL;
 
-    if ( tmh_page_list_pages )
+    if ( tmem_page_list_pages )
     {
-        if ( (pi = tmh_page_list_get()) != NULL )
+        if ( (pi = tmem_page_list_get()) != NULL )
         {
             if ( donate_page(d,pi,0) == 0 )
                 goto out;
             else
-                tmh_page_list_put(pi);
+                tmem_page_list_put(pi);
         }
     }
 
@@ -183,16 +178,16 @@ out:
     ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
     return pi;
 }
-#define tmh_alloc_page_thispool(_pool) \
-    _tmh_alloc_page_thispool(_pool->client->tmh->domain)
+#define tmem_alloc_page_thispool(_pool) \
+    _tmem_alloc_page_thispool(_pool->client->tmem->domain)
 
-static inline void _tmh_free_page_thispool(struct page_info *pi)
+static inline void _tmem_free_page_thispool(struct page_info *pi)
 {
     struct domain *d = page_get_owner(pi);
 
     ASSERT(IS_VALID_PAGE(pi));
     if ( (d == NULL) || steal_page(d,pi,0) == 0 )
-        tmh_page_list_put(pi);
+        tmem_page_list_put(pi);
     else
     {
         scrub_one_page(pi);
@@ -200,30 +195,30 @@ static inline void _tmh_free_page_thispool(struct 
page_info *pi)
         free_domheap_pages(pi,0);
     }
 }
-#define tmh_free_page_thispool(_pool,_pg) \
-    _tmh_free_page_thispool(_pg)
+#define tmem_free_page_thispool(_pool,_pg) \
+    _tmem_free_page_thispool(_pg)
 
 /*
  * Memory allocation for ephemeral (non-persistent) data
  */
 
-static inline void *tmh_alloc_subpage(void *pool, size_t size,
+static inline void *tmem_alloc_subpage(void *pool, size_t size,
                                                  size_t align)
 {
-    ASSERT( size < tmh_mempool_maxalloc );
-    ASSERT( tmh_mempool != NULL );
-    return xmem_pool_alloc(size, tmh_mempool);
+    ASSERT( size < tmem_mempool_maxalloc );
+    ASSERT( tmem_mempool != NULL );
+    return xmem_pool_alloc(size, tmem_mempool);
 }
 
-static inline void tmh_free_subpage(void *ptr, size_t size)
+static inline void tmem_free_subpage(void *ptr, size_t size)
 {
-    ASSERT( size < tmh_mempool_maxalloc );
-    xmem_pool_free(ptr,tmh_mempool);
+    ASSERT( size < tmem_mempool_maxalloc );
+    xmem_pool_free(ptr,tmem_mempool);
 }
 
-static inline struct page_info *tmh_alloc_page(void *pool, int no_heap)
+static inline struct page_info *tmem_alloc_page(void *pool, int no_heap)
 {
-    struct page_info *pi = tmh_page_list_get();
+    struct page_info *pi = tmem_page_list_get();
 
     if ( pi == NULL && !no_heap )
         pi = alloc_domheap_pages(0,0,MEMF_tmem);
@@ -233,55 +228,50 @@ static inline struct page_info *tmh_alloc_page(void 
*pool, int no_heap)
     return pi;
 }
 
-static inline void tmh_free_page(struct page_info *pi)
+static inline void tmem_free_page(struct page_info *pi)
 {
     ASSERT(IS_VALID_PAGE(pi));
-    tmh_page_list_put(pi);
+    tmem_page_list_put(pi);
     atomic_dec(&freeable_page_count);
 }
 
 static inline unsigned int tmem_subpage_maxsize(void)
 {
-    return tmh_mempool_maxalloc;
-}
-
-static inline unsigned long tmh_freeable_pages(void)
-{
-    return tmh_avail_pages() + _atomic_read(freeable_page_count);
+    return tmem_mempool_maxalloc;
 }
 
-static inline unsigned long tmh_free_mb(void)
+static inline unsigned long tmem_free_mb(void)
 {
-    return (tmh_avail_pages() + total_free_pages()) >> (20 - PAGE_SHIFT);
+    return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT);
 }
 
 /*
  * Memory allocation for "infrastructure" data
  */
 
-static inline void *tmh_alloc_infra(size_t size, size_t align)
+static inline void *tmem_alloc_infra(size_t size, size_t align)
 {
     return _xmalloc(size,align);
 }
 
-static inline void tmh_free_infra(void *p)
+static inline void tmem_free_infra(void *p)
 {
     return xfree(p);
 }
 
-#define tmh_lock_all  opt_tmem_lock
-#define tmh_called_from_tmem(_memflags) (_memflags & MEMF_tmem)
+#define tmem_lock_all  opt_tmem_lock
+#define tmem_called_from_tmem(_memflags) (_memflags & MEMF_tmem)
 
 /*  "Client" (==domain) abstraction */
 
 struct client;
 typedef domid_t cli_id_t;
-typedef struct domain tmh_cli_ptr_t;
+typedef struct domain tmem_cli_ptr_t;
 
-extern tmh_client_t *tmh_client_init(cli_id_t);
-extern void tmh_client_destroy(tmh_client_t *);
+extern tmem_client_t *tmem_client_init(cli_id_t);
+extern void tmem_client_destroy(tmem_client_t *);
 
-static inline struct client *tmh_client_from_cli_id(cli_id_t cli_id)
+static inline struct client *tmem_client_from_cli_id(cli_id_t cli_id)
 {
     struct client *c;
     struct domain *d = rcu_lock_domain_by_id(cli_id);
@@ -292,25 +282,25 @@ static inline struct client 
*tmh_client_from_cli_id(cli_id_t cli_id)
     return c;
 }
 
-static inline struct client *tmh_client_from_current(void)
+static inline struct client *tmem_client_from_current(void)
 {
     return (struct client *)(current->domain->tmem);
 }
 
-#define tmh_client_is_dying(_client) (!!_client->tmh->domain->is_dying)
+#define tmem_client_is_dying(_client) (!!_client->tmem->domain->is_dying)
 
-static inline cli_id_t tmh_get_cli_id_from_current(void)
+static inline cli_id_t tmem_get_cli_id_from_current(void)
 {
     return current->domain->domain_id;
 }
 
-static inline tmh_cli_ptr_t *tmh_get_cli_ptr_from_current(void)
+static inline tmem_cli_ptr_t *tmem_get_cli_ptr_from_current(void)
 {
     return current->domain;
 }
 
-static inline bool_t tmh_set_client_from_id(
-    struct client *client, tmh_client_t *tmh, cli_id_t cli_id)
+static inline bool_t tmem_set_client_from_id(
+    struct client *client, tmem_client_t *tmem, cli_id_t cli_id)
 {
     struct domain *d = rcu_lock_domain_by_id(cli_id);
     bool_t rc = 0;
@@ -319,31 +309,31 @@ static inline bool_t tmh_set_client_from_id(
     if ( !d->is_dying )
     {
         d->tmem = client;
-        tmh->domain = d;
+        tmem->domain = d;
         rc = 1;
     }
     rcu_unlock_domain(d);
     return rc;
 }
 
-static inline bool_t tmh_current_permitted(void)
+static inline bool_t tmem_current_permitted(void)
 {
     return !xsm_tmem_op(XSM_HOOK);
 }
 
-static inline bool_t tmh_current_is_privileged(void)
+static inline bool_t tmem_current_is_privileged(void)
 {
     return !xsm_tmem_control(XSM_PRIV);
 }
 
-static inline uint8_t tmh_get_first_byte(struct page_info *pfp)
+static inline uint8_t tmem_get_first_byte(struct page_info *pfp)
 {
     void *p = __map_domain_page(pfp);
 
     return (uint8_t)(*(char *)p);
 }
 
-static inline int tmh_page_cmp(struct page_info *pfp1, struct page_info *pfp2)
+static inline int tmem_page_cmp(struct page_info *pfp1, struct page_info *pfp2)
 {
     const uint64_t *p1 = (uint64_t *)__map_domain_page(pfp1);
     const uint64_t *p2 = (uint64_t *)__map_domain_page(pfp2);
@@ -360,7 +350,7 @@ ASSERT(p2 != NULL);
     return 1;
 }
 
-static inline int tmh_pcd_cmp(void *va1, pagesize_t len1, void *va2, 
pagesize_t len2)
+static inline int tmem_pcd_cmp(void *va1, pagesize_t len1, void *va2, 
pagesize_t len2)
 {
     const char *p1 = (char *)va1;
     const char *p2 = (char *)va2;
@@ -381,7 +371,7 @@ static inline int tmh_pcd_cmp(void *va1, pagesize_t len1, 
void *va2, pagesize_t
     return 1;
 }
 
-static inline int tmh_tze_pfp_cmp(struct page_info *pfp1, pagesize_t pfp_len, 
void *tva, pagesize_t tze_len)
+static inline int tmem_tze_pfp_cmp(struct page_info *pfp1, pagesize_t pfp_len, 
void *tva, pagesize_t tze_len)
 {
     const uint64_t *p1 = (uint64_t *)__map_domain_page(pfp1);
     const uint64_t *p2;
@@ -410,7 +400,7 @@ static inline int tmh_tze_pfp_cmp(struct page_info *pfp1, 
pagesize_t pfp_len, vo
 
 /* return the size of the data in the pfp, ignoring trailing zeroes and
  * rounded up to the nearest multiple of 8 */
-static inline pagesize_t tmh_tze_pfp_scan(struct page_info *pfp)
+static inline pagesize_t tmem_tze_pfp_scan(struct page_info *pfp)
 {
     const uint64_t *p = (uint64_t *)__map_domain_page(pfp);
     pagesize_t bytecount = PAGE_SIZE;
@@ -421,7 +411,7 @@ static inline pagesize_t tmh_tze_pfp_scan(struct page_info 
*pfp)
     return bytecount;
 }
 
-static inline void tmh_tze_copy_from_pfp(void *tva, struct page_info *pfp, 
pagesize_t len)
+static inline void tmem_tze_copy_from_pfp(void *tva, struct page_info *pfp, 
pagesize_t len)
 {
     uint64_t *p1 = (uint64_t *)tva;
     const uint64_t *p2 = (uint64_t *)__map_domain_page(pfp);
@@ -438,7 +428,7 @@ typedef XEN_GUEST_HANDLE(char) cli_va_t;
 typedef XEN_GUEST_HANDLE_PARAM(tmem_op_t) tmem_cli_op_t;
 typedef XEN_GUEST_HANDLE_PARAM(char) tmem_cli_va_param_t;
 
-static inline int tmh_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
+static inline int tmem_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t 
uops)
 {
 #ifdef CONFIG_COMPAT
     if ( is_hvm_vcpu(current) ?
@@ -470,42 +460,42 @@ static inline int tmh_get_tmemop_from_client(tmem_op_t 
*op, tmem_cli_op_t uops)
     return copy_from_guest(op, uops, 1);
 }
 
-#define tmh_cli_buf_null guest_handle_from_ptr(NULL, char)
+#define tmem_cli_buf_null guest_handle_from_ptr(NULL, char)
 
-static inline void tmh_copy_to_client_buf_offset(tmem_cli_va_param_t clibuf,
+static inline void tmem_copy_to_client_buf_offset(tmem_cli_va_param_t clibuf,
                                                 int off,
                                                 char *tmembuf, int len)
 {
     copy_to_guest_offset(clibuf,off,tmembuf,len);
 }
 
-#define tmh_copy_to_client_buf(clibuf, tmembuf, cnt) \
+#define tmem_copy_to_client_buf(clibuf, tmembuf, cnt) \
     copy_to_guest(guest_handle_cast(clibuf, void), tmembuf, cnt)
 
-#define tmh_client_buf_add guest_handle_add_offset
+#define tmem_client_buf_add guest_handle_add_offset
 
-#define TMH_CLI_ID_NULL ((cli_id_t)((domid_t)-1L))
+#define TMEM_CLI_ID_NULL ((cli_id_t)((domid_t)-1L))
 
-#define tmh_cli_id_str "domid"
-#define tmh_client_str "domain"
+#define tmem_cli_id_str "domid"
+#define tmem_client_str "domain"
 
-int tmh_decompress_to_client(xen_pfn_t, void *, size_t,
+int tmem_decompress_to_client(xen_pfn_t, void *, size_t,
                             tmem_cli_va_param_t);
 
-int tmh_compress_from_client(xen_pfn_t, void **, size_t *,
+int tmem_compress_from_client(xen_pfn_t, void **, size_t *,
                             tmem_cli_va_param_t);
 
-int tmh_copy_from_client(struct page_info *, xen_pfn_t, pagesize_t tmem_offset,
+int tmem_copy_from_client(struct page_info *, xen_pfn_t, pagesize_t 
tmem_offset,
     pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t);
 
-int tmh_copy_to_client(xen_pfn_t, struct page_info *, pagesize_t tmem_offset,
+int tmem_copy_to_client(xen_pfn_t, struct page_info *, pagesize_t tmem_offset,
     pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t);
 
-extern int tmh_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, pagesize_t 
len);
+extern int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, pagesize_t 
len);
 
-#define tmh_client_err(fmt, args...)  printk(XENLOG_G_ERR fmt, ##args)
-#define tmh_client_warn(fmt, args...) printk(XENLOG_G_WARNING fmt, ##args)
-#define tmh_client_info(fmt, args...) printk(XENLOG_G_INFO fmt, ##args)
+#define tmem_client_err(fmt, args...)  printk(XENLOG_G_ERR fmt, ##args)
+#define tmem_client_warn(fmt, args...) printk(XENLOG_G_WARNING fmt, ##args)
+#define tmem_client_info(fmt, args...) printk(XENLOG_G_INFO fmt, ##args)
 
 #define TMEM_PERF
 #ifdef TMEM_PERF
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.