[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xenpaging: watch the guests memory/target-tot_pages xenstore value



# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1321804967 -3600
# Node ID 9e3c2ef70c8a6b9c259231581105a4587a73a83a
# Parent  286a741b4d86f85013f956f944e22f208e400904
xenpaging: watch the guests memory/target-tot_pages xenstore value

Subsequent patches will use xenstored to store the numbers of pages
xenpaging is suppose to page-out.
Remove num_pages and use target_pages instead.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
Committed-by: Ian Jackson <ian.jackson.citrix.com>
---


diff -r 286a741b4d86 -r 9e3c2ef70c8a tools/xenpaging/xenpaging.c
--- a/tools/xenpaging/xenpaging.c       Sun Nov 20 17:02:45 2011 +0100
+++ b/tools/xenpaging/xenpaging.c       Sun Nov 20 17:02:47 2011 +0100
@@ -19,8 +19,10 @@
  */
 
 #define _XOPEN_SOURCE  600
+#define _GNU_SOURCE
 
 #include <inttypes.h>
+#include <stdio.h>
 #include <stdlib.h>
 #include <stdarg.h>
 #include <time.h>
@@ -35,6 +37,10 @@
 #include "policy.h"
 #include "xenpaging.h"
 
+/* Defines number of mfns a guest should use at a time, in KiB */
+#define WATCH_TARGETPAGES "memory/target-tot_pages"
+static char *watch_target_tot_pages;
+static char *dom_path;
 static char watch_token[16];
 static char filename[80];
 static int interrupted;
@@ -72,7 +78,7 @@
 {
     xc_interface *xch = paging->xc_handle;
     xc_evtchn *xce = paging->mem_event.xce_handle;
-    char **vec;
+    char **vec, *val;
     unsigned int num;
     struct pollfd fd[2];
     int port;
@@ -111,6 +117,25 @@
                     rc = 0;
                 }
             }
+            else if ( strcmp(vec[XS_WATCH_PATH], watch_target_tot_pages) == 0 )
+            {
+                int ret, target_tot_pages;
+                val = xs_read(paging->xs_handle, XBT_NULL, vec[XS_WATCH_PATH], 
NULL);
+                if ( val )
+                {
+                    ret = sscanf(val, "%d", &target_tot_pages);
+                    if ( ret > 0 )
+                    {
+                        /* KiB to pages */
+                        target_tot_pages >>= 2;
+                        if ( target_tot_pages < 0 || target_tot_pages > 
paging->max_pages )
+                            target_tot_pages = paging->max_pages;
+                        paging->target_tot_pages = target_tot_pages;
+                        DPRINTF("new target_tot_pages %d\n", target_tot_pages);
+                    }
+                    free(val);
+                }
+            }
             free(vec);
         }
     }
@@ -216,6 +241,25 @@
         goto err;
     }
 
+    /* Watch xenpagings working target */
+    dom_path = xs_get_domain_path(paging->xs_handle, domain_id);
+    if ( !dom_path )
+    {
+        PERROR("Could not find domain path\n");
+        goto err;
+    }
+    if ( asprintf(&watch_target_tot_pages, "%s/%s", dom_path, 
WATCH_TARGETPAGES) < 0 )
+    {
+        PERROR("Could not alloc watch path\n");
+        goto err;
+    }
+    DPRINTF("watching '%s'\n", watch_target_tot_pages);
+    if ( xs_watch(paging->xs_handle, watch_target_tot_pages, "") == false )
+    {
+        PERROR("Could not bind to xenpaging watch\n");
+        goto err;
+    }
+
     p = getenv("XENPAGING_POLICY_MRU_SIZE");
     if ( p && *p )
     {
@@ -342,6 +386,8 @@
             free(paging->mem_event.ring_page);
         }
 
+        free(dom_path);
+        free(watch_target_tot_pages);
         free(paging->bitmap);
         free(paging);
     }
@@ -357,6 +403,9 @@
     if ( paging == NULL )
         return 0;
 
+    xs_unwatch(paging->xs_handle, watch_target_tot_pages, "");
+    xs_unwatch(paging->xs_handle, "@releaseDomain", watch_token);
+
     xch = paging->xc_handle;
     paging->xc_handle = NULL;
     /* Tear down domain paging in Xen */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.