[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xen, pod: Zero-check recently populated pages (checklast)


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Wed, 04 Jul 2012 04:11:13 +0000
  • Delivery-date: Wed, 04 Jul 2012 04:11:20 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User George Dunlap <george.dunlap@xxxxxxxxxxxxx>
# Date 1340893083 -3600
# Node ID 437b7cc52db7fb2d0c4adacc9bf9bad2979cf110
# Parent  0ff7269bbcedc22db1f48661dd52355081c19eaf
xen,pod: Zero-check recently populated pages (checklast)

When demand-populating pages due to guest accesses, check recently populated
pages to see if we can reclaim them for the cache.  This should keep the PoD
cache filled when the start-of-day scrubber is going through.

The number 128 was chosen by experiment.  Windows does its page
scrubbing in parallel; while a small nubmer like 4 works well for
single VMs, it breaks down as multiple vcpus are scrubbing different
pages in parallel.  Increasing to 128 works well for higher numbers of
vcpus.

v2:
 - Wrapped some long lines
 - unsigned int for index, unsigned long for array
v3:
 - Use PAGE_ORDER_2M instead of 9
 - Removed inappropriate use of p2m_pod_zero_check_superpage() return value

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 0ff7269bbced -r 437b7cc52db7 xen/arch/x86/mm/p2m-pod.c
--- a/xen/arch/x86/mm/p2m-pod.c Thu Jun 28 15:18:00 2012 +0100
+++ b/xen/arch/x86/mm/p2m-pod.c Thu Jun 28 15:18:03 2012 +0100
@@ -926,6 +926,27 @@ p2m_pod_emergency_sweep_super(struct p2m
     p2m->pod.reclaim_super = i ? i - SUPERPAGE_PAGES : 0;
 }
 
+/* When populating a new superpage, look at recently populated superpages
+ * hoping that they've been zeroed.  This will snap up zeroed pages as soon as 
+ * the guest OS is done with them. */
+static void
+p2m_pod_check_last_super(struct p2m_domain *p2m, unsigned long gfn_aligned)
+{
+    unsigned long check_gfn;
+
+    ASSERT(p2m->pod.last_populated_index < POD_HISTORY_MAX);
+
+    check_gfn = p2m->pod.last_populated[p2m->pod.last_populated_index];
+
+    p2m->pod.last_populated[p2m->pod.last_populated_index] = gfn_aligned;
+
+    p2m->pod.last_populated_index =
+        ( p2m->pod.last_populated_index + 1 ) % POD_HISTORY_MAX;
+
+    p2m_pod_zero_check_superpage(p2m, check_gfn);
+}
+
+
 #define POD_SWEEP_STRIDE  16
 static void
 p2m_pod_emergency_sweep(struct p2m_domain *p2m)
@@ -1083,6 +1104,12 @@ p2m_pod_demand_populate(struct p2m_domai
         __trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), &t);
     }
 
+    /* Check the last guest demand-populate */
+    if ( p2m->pod.entry_count > p2m->pod.count 
+         && (order == PAGE_ORDER_2M)
+         && (q & P2M_ALLOC) )
+        p2m_pod_check_last_super(p2m, gfn_aligned);
+
     pod_unlock(p2m);
     return 0;
 out_of_memory:
diff -r 0ff7269bbced -r 437b7cc52db7 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Jun 28 15:18:00 2012 +0100
+++ b/xen/include/asm-x86/p2m.h Thu Jun 28 15:18:03 2012 +0100
@@ -287,6 +287,10 @@ struct p2m_domain {
         unsigned         reclaim_super; /* Last gpfn of a scan */
         unsigned         reclaim_single; /* Last gpfn of a scan */
         unsigned         max_guest;    /* gpfn of max guest demand-populate */
+#define POD_HISTORY_MAX 128
+        /* gpfn of last guest superpage demand-populated */
+        unsigned long    last_populated[POD_HISTORY_MAX]; 
+        unsigned int     last_populated_index;
         mm_lock_t        lock;         /* Locking of private pod structs,   *
                                         * not relying on the p2m lock.      */
     } pod;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.