[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] tmem: Unify XEN_SYSCTL_TMEM_OP_[[SAVE_[BEGIN|END]|RESTORE_BEGIN]



commit 4ca5e0103d0c713e9ec9fefe4ca9351abc342ad7
Author:     Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
AuthorDate: Mon Sep 26 11:05:09 2016 -0400
Commit:     Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
CommitDate: Fri Sep 30 15:26:57 2016 -0400

    tmem: Unify XEN_SYSCTL_TMEM_OP_[[SAVE_[BEGIN|END]|RESTORE_BEGIN]
    
    return values. For success they used to be 1 ([SAVE,RESTORE]_BEGIN),
    0 if guest did not have any tmem (but only for SAVE_BEGIN), and
    -1 for any type of failure.
    
    And SAVE_END (which you would think would mirror SAVE_BEGIN)
    had 0 for success and -1 if guest did not any tmem enabled for it.
    
    This is confusing. Now the code will return 0 if the operation was
    success.  Various XEN_EXX values are returned if tmem is not enabled
    or the operation could not performed.
    
    The xc_tmem.c code only needs one place to check - where we use
    SAVE_BEGIN. The place where RESTORE_BEGIN is used will have errno
    with the proper error value and return will be -1, so will still
    fail properly.
    
    Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
    Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 tools/libxc/xc_tmem.c | 14 +++++++++++---
 xen/common/tmem.c     | 17 +++++++++--------
 2 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/tools/libxc/xc_tmem.c b/tools/libxc/xc_tmem.c
index 9e21d41..f4dbf58 100644
--- a/tools/libxc/xc_tmem.c
+++ b/tools/libxc/xc_tmem.c
@@ -214,15 +214,23 @@ int xc_tmem_save(xc_interface *xch,
                  int dom, int io_fd, int live, int field_marker)
 {
     int marker = field_marker;
-    int i, j;
+    int i, j, rc;
     uint32_t flags;
     uint32_t minusone = -1;
     uint32_t pool_id;
     struct tmem_handle *h;
     xen_tmem_client_t info;
 
-    if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_BEGIN,dom,live,0,NULL) 
<= 0 )
-        return 0;
+    rc = xc_tmem_control(xch, 0, XEN_SYSCTL_TMEM_OP_SAVE_BEGIN,
+                         dom, live, 0, NULL);
+    if ( rc )
+    {
+        /* Nothing to save - no tmem enabled. */
+        if ( errno == ENOENT )
+            return 0;
+
+        return rc;
+    }
 
     if ( write_exact(io_fd, &marker, sizeof(marker)) )
         return -1;
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index ab354b6..510d11c 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -1656,30 +1656,31 @@ static int tmemc_save_subop(int cli_id, uint32_t 
pool_id,
     struct client *client = tmem_client_from_cli_id(cli_id);
     uint32_t p;
     struct tmem_page_descriptor *pgp, *pgp2;
-    int rc = -1;
+    int rc = -ENOENT;
 
     switch(subop)
     {
     case XEN_SYSCTL_TMEM_OP_SAVE_BEGIN:
         if ( client == NULL )
-            return 0;
+            break;
         for (p = 0; p < MAX_POOLS_PER_DOMAIN; p++)
             if ( client->pools[p] != NULL )
                 break;
+
         if ( p == MAX_POOLS_PER_DOMAIN )
-        {
-            rc = 0;
             break;
-        }
+
         client->was_frozen = client->info.flags.u.frozen;
         client->info.flags.u.frozen = 1;
         if ( arg1 != 0 )
             client->info.flags.u.migrating = 1;
-        rc = 1;
+        rc = 0;
         break;
     case XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN:
-        if ( client == NULL && (client = client_create(cli_id)) != NULL )
-            return 1;
+        if ( client == NULL )
+            rc = client_create(cli_id) ? 0 : -ENOMEM;
+        else
+            rc = -EEXIST;
         break;
     case XEN_SYSCTL_TMEM_OP_SAVE_END:
         if ( client == NULL )
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.