[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Keep a list of pre-zero'ed L1 shadow pages.



ChangeSet 1.1260, 2005/03/24 12:33:04+00:00, mafetter@xxxxxxxxxxxxxxxx

        Keep a list of pre-zero'ed L1 shadow pages.
        Avoid the cost of zero'ing them upon allocation.
        
        Signed-off-by: michael.fetterman@xxxxxxxxxxxx



 arch/x86/domain.c        |    3 ++-
 arch/x86/shadow.c        |   43 +++++++++++++++++++++++++++++++++++++------
 include/asm-x86/domain.h |    2 ++
 include/asm-x86/shadow.h |    2 +-
 include/xen/perfc_defn.h |   35 ++++++++++++++++-------------------
 5 files changed, 58 insertions(+), 27 deletions(-)


diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     2005-04-05 12:18:19 -04:00
+++ b/xen/arch/x86/domain.c     2005-04-05 12:18:19 -04:00
@@ -262,7 +262,8 @@
             mk_l3_pgentry(__pa(d->arch.mm_perdomain_l2) | __PAGE_HYPERVISOR);
 #endif
 
-        shadow_lock_init(d);        
+        shadow_lock_init(d);
+        INIT_LIST_HEAD(&d->arch.free_shadow_frames);
     }
 }
 
diff -Nru a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     2005-04-05 12:18:19 -04:00
+++ b/xen/arch/x86/shadow.c     2005-04-05 12:18:19 -04:00
@@ -187,7 +187,29 @@
     unsigned long smfn;
     int pin = 0;
 
-    page = alloc_domheap_page(NULL);
+    // Currently, we only keep pre-zero'ed pages around for use as L1's...
+    // This will change.  Soon.
+    //
+    if ( psh_type == PGT_l1_shadow )
+    {
+        if ( !list_empty(&d->arch.free_shadow_frames) )
+        {
+            struct list_head *entry = d->arch.free_shadow_frames.next;
+            page = list_entry(entry, struct pfn_info, list);
+            list_del(entry);
+            perfc_decr(free_l1_pages);
+        }
+        else
+        {
+            page = alloc_domheap_page(NULL);
+            void *l1 = map_domain_mem(page_to_pfn(page) << PAGE_SHIFT);
+            memset(l1, 0, PAGE_SIZE);
+            unmap_domain_mem(l1);
+        }
+    }
+    else
+        page = alloc_domheap_page(NULL);
+
     if ( unlikely(page == NULL) )
     {
         printk("Couldn't alloc shadow page! dom%d count=%d\n",
@@ -271,11 +293,21 @@
 {
     l1_pgentry_t *pl1e = map_domain_mem(smfn << PAGE_SHIFT);
     int i;
+    struct pfn_info *spage = pfn_to_page(smfn);
+    u32 min_max = spage->tlbflush_timestamp;
+    int min = SHADOW_MIN(min_max);
+    int max = SHADOW_MAX(min_max);
 
-    for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
+    for ( i = min; i <= max; i++ )
+    {
         put_page_from_l1e(pl1e[i], d);
+        pl1e[i] = mk_l1_pgentry(0);
+    }
 
     unmap_domain_mem(pl1e);
+
+    list_add(&spage->list, &d->arch.free_shadow_frames);
+    perfc_incr(free_l1_pages);
 }
 
 static void inline
@@ -372,7 +404,8 @@
     page->tlbflush_timestamp = 0;
     page->u.free.cpu_mask = 0;
 
-    free_domheap_page(page);
+    if ( type != PGT_l1_shadow )
+        free_domheap_page(page);
 }
 
 static void inline
@@ -1428,8 +1461,6 @@
             &(shadow_linear_pg_table[l1_linear_offset(va) &
                                      ~(L1_PAGETABLE_ENTRIES-1)]);
 
-        memset(spl1e, 0, PAGE_SIZE);
-
         unsigned long sl1e;
         int index = l1_table_offset(va);
         int min = 1, max = 0;
@@ -2006,7 +2037,7 @@
     unsigned long *guest, *shadow, *snapshot;
     int need_flush = 0, external = shadow_mode_external(d);
     int unshadow;
-    unsigned long min_max;
+    u32 min_max;
     int min, max;
 
     ASSERT(spin_is_locked(&d->arch.shadow_lock));
diff -Nru a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      2005-04-05 12:18:19 -04:00
+++ b/xen/include/asm-x86/domain.h      2005-04-05 12:18:19 -04:00
@@ -50,6 +50,8 @@
     struct out_of_sync_entry *out_of_sync_extras;
     unsigned int out_of_sync_extras_count;
 
+    struct list_head free_shadow_frames;
+
     pagetable_t  phys_table;               /* guest 1:1 pagetable */
 
 } __cacheline_aligned;
diff -Nru a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      2005-04-05 12:18:19 -04:00
+++ b/xen/include/asm-x86/shadow.h      2005-04-05 12:18:19 -04:00
@@ -1294,7 +1294,7 @@
 shadow_update_min_max(unsigned long smfn, int index)
 {
     struct pfn_info *sl1page = pfn_to_page(smfn);
-    unsigned long min_max = sl1page->tlbflush_timestamp;
+    u32 min_max = sl1page->tlbflush_timestamp;
     int min = SHADOW_MIN(min_max);
     int max = SHADOW_MAX(min_max);
     int update = 0;
diff -Nru a/xen/include/xen/perfc_defn.h b/xen/include/xen/perfc_defn.h
--- a/xen/include/xen/perfc_defn.h      2005-04-05 12:18:19 -04:00
+++ b/xen/include/xen/perfc_defn.h      2005-04-05 12:18:19 -04:00
@@ -1,3 +1,16 @@
+#define PERFC_MAX_PT_UPDATES 64
+#define PERFC_PT_UPDATES_BUCKET_SIZE 3
+PERFCOUNTER_ARRAY( wpt_updates, "writable pt updates", PERFC_MAX_PT_UPDATES )
+PERFCOUNTER_ARRAY( bpt_updates, "batched pt updates", PERFC_MAX_PT_UPDATES )
+
+PERFCOUNTER_ARRAY( hypercalls, "hypercalls", NR_hypercalls )
+PERFCOUNTER_ARRAY( exceptions, "exceptions", 32 )
+
+#define VMX_PERF_EXIT_REASON_SIZE 37
+#define VMX_PERF_VECTOR_SIZE 0x20
+PERFCOUNTER_ARRAY( vmexits, "vmexits", VMX_PERF_EXIT_REASON_SIZE )
+PERFCOUNTER_ARRAY( cause_vector, "cause vector", VMX_PERF_VECTOR_SIZE )
+
 PERFCOUNTER_CPU (seg_fixups,   "segmentation fixups" )
 
 PERFCOUNTER_CPU( irqs,         "#interrupts" )
@@ -31,24 +44,13 @@
 PERFSTATUS( shadow_l2_pages, "current # shadow L2 pages" )
 PERFSTATUS( shadow_l1_pages, "current # shadow L1 pages" )
 PERFSTATUS( hl2_table_pages, "current # hl2 pages" )
+PERFSTATUS( snapshot_pages,  "current # fshadow snapshot pages" )
+PERFSTATUS( writable_pte_predictions, "# writable pte predictions")
+PERFSTATUS( free_l1_pages,   "current # free shadow L1 pages" )
 
 PERFCOUNTER_CPU( check_pagetable, "calls to check_pagetable" )
 PERFCOUNTER_CPU( check_all_pagetables, "calls to check_all_pagetables" )
 
-#define PERFC_MAX_PT_UPDATES 64
-#define PERFC_PT_UPDATES_BUCKET_SIZE 3
-PERFCOUNTER_ARRAY( wpt_updates, "writable pt updates", PERFC_MAX_PT_UPDATES )
-PERFCOUNTER_ARRAY( bpt_updates, "batched pt updates", PERFC_MAX_PT_UPDATES )
-
-PERFCOUNTER_ARRAY( hypercalls, "hypercalls", NR_hypercalls )
-PERFCOUNTER_ARRAY( exceptions, "exceptions", 32 )
-
-#define VMX_PERF_EXIT_REASON_SIZE 37
-#define VMX_PERF_VECTOR_SIZE 0x20
-PERFCOUNTER_ARRAY( vmexits, "vmexits", VMX_PERF_EXIT_REASON_SIZE )
-PERFCOUNTER_ARRAY( cause_vector, "cause vector", VMX_PERF_VECTOR_SIZE )
-
-
 PERFCOUNTER_CPU( shadow_hl2_table_count,   "shadow_hl2_table count" )
 PERFCOUNTER_CPU( shadow_set_l1e_force_map, "shadow_set_l1e forced to map l1" )
 PERFCOUNTER_CPU( shadow_set_l1e_unlinked,  "shadow_set_l1e found unlinked l1" )
@@ -56,10 +58,6 @@
 PERFCOUNTER_CPU( shadow_invlpg_faults,     "shadow_invlpg's get_user faulted")
 PERFCOUNTER_CPU( unshadow_l2_count,        "unpinned L2 count")
 
-
-/* STATUS counters do not reset when 'P' is hit */
-PERFSTATUS( snapshot_pages,  "current # fshadow snapshot pages" )
-
 PERFCOUNTER_CPU(shadow_status_shortcut, "fastpath miss on shadow cache")
 PERFCOUNTER_CPU(shadow_status_calls,    "calls to ___shadow_status" )
 PERFCOUNTER_CPU(shadow_status_miss,     "missed shadow cache" )
@@ -87,7 +85,6 @@
 PERFCOUNTER_CPU(validate_hl2e_changes,             "validate_hl2e makes 
changes")
 PERFCOUNTER_CPU(exception_fixed,                   "pre-exception fixed")
 PERFCOUNTER_CPU(gpfn_to_mfn_safe,                  "calls to gpfn_to_mfn_safe")
-PERFSTATUS( writable_pte_predictions, "# writable pte predictions")
 PERFCOUNTER_CPU(remove_write_access,               "calls to 
remove_write_access")
 PERFCOUNTER_CPU(remove_write_access_easy,          "easy outs of 
remove_write_access")
 PERFCOUNTER_CPU(remove_write_no_work,              "no work in 
remove_write_access")

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.