[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 08/11] tmem: cleanup: drop useless wrap functions



_tmem_alloc/free_subpage_thispool() and _tmem_alloc/free_page_thispool() are
useless, replace them with tmem_alloc/free_subpage_thispool() and
tmem_alloc/free_page_thispool() directly.

Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
---
 xen/common/tmem.c          |    8 ++++----
 xen/common/tmem_xen.c      |    4 ++--
 xen/include/xen/tmem_xen.h |   17 ++++-------------
 3 files changed, 10 insertions(+), 19 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index db18b65..d3318d4 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -311,7 +311,7 @@ static NOINLINE void *_tmem_malloc(size_t size, size_t 
align, struct tmem_pool *
     void *v;
 
     if ( (pool != NULL) && is_persistent(pool) )
-        v = tmem_alloc_subpage_thispool(pool,size,align);
+        v = 
tmem_alloc_subpage_thispool(pool->client->persistent_pool,size,align);
     else
         v = tmem_alloc_subpage(pool, size, align);
     if ( v == NULL )
@@ -324,7 +324,7 @@ static NOINLINE void tmem_free(void *p, size_t size, struct 
tmem_pool *pool)
     if ( pool == NULL || !is_persistent(pool) )
         tmem_free_subpage(p,size);
     else
-        tmem_free_subpage_thispool(pool,p,size);
+        tmem_free_subpage_thispool(pool->client->persistent_pool,p,size);
 }
 
 static NOINLINE struct page_info *tmem_page_alloc(struct tmem_pool *pool)
@@ -332,7 +332,7 @@ static NOINLINE struct page_info *tmem_page_alloc(struct 
tmem_pool *pool)
     struct page_info *pfp = NULL;
 
     if ( pool != NULL && is_persistent(pool) )
-        pfp = tmem_alloc_page_thispool(pool);
+        pfp = tmem_alloc_page_thispool(pool->client->domain);
     else
         pfp = tmem_alloc_page(pool,0);
     if ( pfp == NULL )
@@ -348,7 +348,7 @@ static NOINLINE void tmem_page_free(struct tmem_pool *pool, 
struct page_info *pf
     if ( pool == NULL || !is_persistent(pool) )
         tmem_free_page(pfp);
     else
-        tmem_free_page_thispool(pool,pfp);
+        tmem_free_page_thispool(pfp);
     atomic_dec_and_assert(global_page_count);
 }
 
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 1309932..0f5955d 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -347,7 +347,7 @@ void *tmem_persistent_pool_page_get(unsigned long size)
     struct domain *d = current->domain;
 
     ASSERT(size == PAGE_SIZE);
-    if ( (pi = _tmem_alloc_page_thispool(d)) == NULL )
+    if ( (pi = tmem_alloc_page_thispool(d)) == NULL )
         return NULL;
     ASSERT(IS_VALID_PAGE(pi));
     return page_to_virt(pi);
@@ -360,7 +360,7 @@ void tmem_persistent_pool_page_put(void *page_va)
     ASSERT(IS_PAGE_ALIGNED(page_va));
     pi = mfn_to_page(virt_to_mfn(page_va));
     ASSERT(IS_VALID_PAGE(pi));
-    _tmem_free_page_thispool(pi);
+    tmem_free_page_thispool(pi);
 }
 
 /******************  XEN-SPECIFIC HOST INITIALIZATION ********************/
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 3c99bee..0b64309 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -118,7 +118,7 @@ static inline bool_t domain_fully_allocated(struct domain 
*d)
 #define tmem_client_memory_fully_allocated(_pool) \
  domain_fully_allocated(_pool->client->domain)
 
-static inline void *_tmem_alloc_subpage_thispool(struct xmem_pool 
*cmem_mempool,
+static inline void *tmem_alloc_subpage_thispool(struct xmem_pool *cmem_mempool,
                                                  size_t size, size_t align)
 {
 #if 0
@@ -130,21 +130,16 @@ static inline void *_tmem_alloc_subpage_thispool(struct 
xmem_pool *cmem_mempool,
         return NULL;
     return xmem_pool_alloc(size, cmem_mempool);
 }
-#define tmem_alloc_subpage_thispool(_pool, _s, _a) \
-            _tmem_alloc_subpage_thispool(pool->client->persistent_pool, \
-                                         _s, _a)
 
-static inline void _tmem_free_subpage_thispool(struct xmem_pool *cmem_mempool,
+static inline void tmem_free_subpage_thispool(struct xmem_pool *cmem_mempool,
                                                void *ptr, size_t size)
 {
     ASSERT( size < tmem_mempool_maxalloc );
     ASSERT( cmem_mempool != NULL );
     xmem_pool_free(ptr,cmem_mempool);
 }
-#define tmem_free_subpage_thispool(_pool, _p, _s) \
- _tmem_free_subpage_thispool(_pool->client->persistent_pool, _p, _s)
 
-static inline struct page_info *_tmem_alloc_page_thispool(struct domain *d)
+static inline struct page_info *tmem_alloc_page_thispool(struct domain *d)
 {
     struct page_info *pi;
 
@@ -171,10 +166,8 @@ out:
     ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
     return pi;
 }
-#define tmem_alloc_page_thispool(_pool) \
-    _tmem_alloc_page_thispool(_pool->client->domain)
 
-static inline void _tmem_free_page_thispool(struct page_info *pi)
+static inline void tmem_free_page_thispool(struct page_info *pi)
 {
     struct domain *d = page_get_owner(pi);
 
@@ -188,8 +181,6 @@ static inline void _tmem_free_page_thispool(struct 
page_info *pi)
         free_domheap_pages(pi,0);
     }
 }
-#define tmem_free_page_thispool(_pool,_pg) \
-    _tmem_free_page_thispool(_pg)
 
 /*
  * Memory allocation for ephemeral (non-persistent) data
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.