[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [qemu-xen master] block/gluster: memory usage: use one glfs instance per volume



commit 6349c15410361d3fe52c9beee309954d606f8ccd
Author:     Prasanna Kumar Kalever <prasanna.kalever@xxxxxxxxxx>
AuthorDate: Thu Oct 27 20:54:50 2016 +0530
Commit:     Jeff Cody <jcody@xxxxxxxxxx>
CommitDate: Tue Nov 1 07:55:57 2016 -0400

    block/gluster: memory usage: use one glfs instance per volume
    
    Currently, for every drive accessed via gfapi we create a new glfs
    instance (call glfs_new() followed by glfs_init()) which could consume
    memory in few 100 MB's, from the table below it looks like for each
    instance ~300 MB VSZ was consumed
    
    Before:
    -------
    Disks   VSZ     RSS
    1       1098728 187756
    2       1430808 198656
    3       1764932 199704
    4       2084728 202684
    
    This patch maintains a list of pre-opened glfs objects. On adding
    a new drive belonging to the same gluster volume, we just reuse the
    existing glfs object by updating its refcount.
    
    With this approch we shrink up the unwanted memory consumption and
    glfs_new/glfs_init calls for accessing a disk (file) if belongs to
    same volume.
    
    From below table notice that the memory usage after adding a disk
    (which will reuse the existing glfs object hence) is in negligible
    compared to before.
    
    After:
    ------
    Disks   VSZ     RSS
    1       1101964 185768
    2       1109604 194920
    3       1114012 196036
    4       1114496 199868
    
    Disks: number of -drive
    VSZ: virtual memory size of the process in KiB
    RSS: resident set size, the non-swapped physical memory (in kiloBytes)
    
    VSZ and RSS are analyzed using 'ps aux' utility.
    
    Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@xxxxxxxxxx>
    Reviewed-by: Jeff Cody <jcody@xxxxxxxxxx>
    Message-id: 1477581890-4811-1-git-send-email-prasanna.kalever@xxxxxxxxxx
    Signed-off-by: Jeff Cody <jcody@xxxxxxxxxx>
---
 block/gluster.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 80 insertions(+), 14 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 1735d12..40bd29c 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -56,6 +56,19 @@ typedef struct BDRVGlusterReopenState {
 } BDRVGlusterReopenState;
 
 
+typedef struct GlfsPreopened {
+    char *volume;
+    glfs_t *fs;
+    int ref;
+} GlfsPreopened;
+
+typedef struct ListElement {
+    QLIST_ENTRY(ListElement) list;
+    GlfsPreopened saved;
+} ListElement;
+
+static QLIST_HEAD(glfs_list, ListElement) glfs_list;
+
 static QemuOptsList qemu_gluster_create_opts = {
     .name = "qemu-gluster-create-opts",
     .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
@@ -194,6 +207,57 @@ static QemuOptsList runtime_tcp_opts = {
     },
 };
 
+static void glfs_set_preopened(const char *volume, glfs_t *fs)
+{
+    ListElement *entry = NULL;
+
+    entry = g_new(ListElement, 1);
+
+    entry->saved.volume = g_strdup(volume);
+
+    entry->saved.fs = fs;
+    entry->saved.ref = 1;
+
+    QLIST_INSERT_HEAD(&glfs_list, entry, list);
+}
+
+static glfs_t *glfs_find_preopened(const char *volume)
+{
+    ListElement *entry = NULL;
+
+     QLIST_FOREACH(entry, &glfs_list, list) {
+        if (strcmp(entry->saved.volume, volume) == 0) {
+            entry->saved.ref++;
+            return entry->saved.fs;
+        }
+     }
+
+    return NULL;
+}
+
+static void glfs_clear_preopened(glfs_t *fs)
+{
+    ListElement *entry = NULL;
+
+    if (fs == NULL) {
+        return;
+    }
+
+    QLIST_FOREACH(entry, &glfs_list, list) {
+        if (entry->saved.fs == fs) {
+            if (--entry->saved.ref) {
+                return;
+            }
+
+            QLIST_REMOVE(entry, list);
+
+            glfs_fini(entry->saved.fs);
+            g_free(entry->saved.volume);
+            g_free(entry);
+        }
+    }
+}
+
 static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
 {
     char *p, *q;
@@ -331,11 +395,18 @@ static struct glfs 
*qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
     int old_errno;
     GlusterServerList *server;
 
+    glfs = glfs_find_preopened(gconf->volume);
+    if (glfs) {
+        return glfs;
+    }
+
     glfs = glfs_new(gconf->volume);
     if (!glfs) {
         goto out;
     }
 
+    glfs_set_preopened(gconf->volume, glfs);
+
     for (server = gconf->server; server; server = server->next) {
         if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
             ret = glfs_set_volfile_server(glfs,
@@ -387,7 +458,7 @@ static struct glfs 
*qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
 out:
     if (glfs) {
         old_errno = errno;
-        glfs_fini(glfs);
+        glfs_clear_preopened(glfs);
         errno = old_errno;
     }
     return NULL;
@@ -767,9 +838,9 @@ out:
     if (s->fd) {
         glfs_close(s->fd);
     }
-    if (s->glfs) {
-        glfs_fini(s->glfs);
-    }
+
+    glfs_clear_preopened(s->glfs);
+
     return ret;
 }
 
@@ -836,9 +907,8 @@ static void qemu_gluster_reopen_commit(BDRVReopenState 
*state)
     if (s->fd) {
         glfs_close(s->fd);
     }
-    if (s->glfs) {
-        glfs_fini(s->glfs);
-    }
+
+    glfs_clear_preopened(s->glfs);
 
     /* use the newly opened image / connection */
     s->fd         = reop_s->fd;
@@ -863,9 +933,7 @@ static void qemu_gluster_reopen_abort(BDRVReopenState 
*state)
         glfs_close(reop_s->fd);
     }
 
-    if (reop_s->glfs) {
-        glfs_fini(reop_s->glfs);
-    }
+    glfs_clear_preopened(reop_s->glfs);
 
     g_free(state->opaque);
     state->opaque = NULL;
@@ -989,9 +1057,7 @@ static int qemu_gluster_create(const char *filename,
 out:
     g_free(tmp);
     qapi_free_BlockdevOptionsGluster(gconf);
-    if (glfs) {
-        glfs_fini(glfs);
-    }
+    glfs_clear_preopened(glfs);
     return ret;
 }
 
@@ -1064,7 +1130,7 @@ static void qemu_gluster_close(BlockDriverState *bs)
         glfs_close(s->fd);
         s->fd = NULL;
     }
-    glfs_fini(s->glfs);
+    glfs_clear_preopened(s->glfs);
 }
 
 static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
--
generated by git-patchbot for /home/xen/git/qemu-xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.