[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] tmem: refator function tmem_ensure_avail_pages()



commit 6f06f406f8b5207493f70f29efe523bf53243f36
Author:     Bob Liu <lliubbo@xxxxxxxxx>
AuthorDate: Thu Dec 12 19:05:12 2013 +0800
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Jan 8 08:55:38 2014 +0100

    tmem: refator function tmem_ensure_avail_pages()
    
    tmem_ensure_avail_pages() doesn't return a value which is incorrect because
    the caller need to confirm whether there is enough memory.
    
    Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
    Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 xen/common/tmem.c          |   32 ++++++++++++++++++++------------
 xen/include/xen/tmem_xen.h |    6 ------
 2 files changed, 20 insertions(+), 18 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 2659651..685efef 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -1409,22 +1409,28 @@ static unsigned long tmem_relinquish_npages(unsigned 
long n)
     return avail_pages;
 }
 
-/* Under certain conditions (e.g. if each client is putting pages for exactly
+/*
+ * Under certain conditions (e.g. if each client is putting pages for exactly
  * one object), once locks are held, freeing up memory may
  * result in livelocks and very long "put" times, so we try to ensure there
  * is a minimum amount of memory (1MB) available BEFORE any data structure
- * locks are held */
-static inline void tmem_ensure_avail_pages(void)
+ * locks are held.
+ */
+static inline bool_t tmem_ensure_avail_pages(void)
 {
     int failed_evict = 10;
+    unsigned long free_mem;
 
-    while ( !tmem_free_mb() )
-    {
-        if ( tmem_evict() )
-            continue;
-        else if ( failed_evict-- <= 0 )
-            break;
-    }
+    do {
+        free_mem = (tmem_page_list_pages + total_free_pages())
+                        >> (20 - PAGE_SHIFT);
+        if ( free_mem )
+            return 1;
+        if ( !tmem_evict() )
+            failed_evict--;
+    } while ( failed_evict > 0 );
+
+    return 0;
 }
 
 /************ TMEM CORE OPERATIONS ************************************/
@@ -2681,9 +2687,11 @@ long do_tmem_op(tmem_cli_op_t uops)
                               op.u.creat.uuid[0], op.u.creat.uuid[1]);
         break;
     case TMEM_PUT_PAGE:
-        tmem_ensure_avail_pages();
-        rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
+        if (tmem_ensure_avail_pages())
+            rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
                         tmem_cli_buf_null);
+        else
+            rc = -ENOMEM;
         if (rc == 1) succ_put = 1;
         else non_succ_put = 1;
         break;
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 11f4c2d..4e6c234 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -164,13 +164,7 @@ static inline void __tmem_free_page(struct page_info *pi)
     atomic_dec(&freeable_page_count);
 }
 
-static inline unsigned long tmem_free_mb(void)
-{
-    return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT);
-}
-
 /*  "Client" (==domain) abstraction */
-
 static inline struct client *tmem_client_from_cli_id(domid_t cli_id)
 {
     struct client *c;
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.