[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Update map_domain_page() documentation (mappings may only be



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID ed7888c838ad5cd213a24d21ae294b31a2500f4d
# Parent  542cb7acb21af9704044cea6720c84e73cb165f3
Update map_domain_page() documentation (mappings may only be
be used within the mapping vcpu). Implement TLB flush
filtering on the per-domain mapcache.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 542cb7acb21a -r ed7888c838ad xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Tue Jan 10 17:25:45 2006
+++ b/xen/arch/x86/x86_32/domain_page.c Tue Jan 10 17:53:44 2006
@@ -40,7 +40,8 @@
 {
     unsigned long va;
     unsigned int idx, i, flags, vcpu = current->vcpu_id;
-    struct mapcache *cache = &current->domain->arch.mapcache;
+    struct domain *d;
+    struct mapcache *cache;
 #ifndef NDEBUG
     unsigned int flush_count = 0;
 #endif
@@ -49,17 +50,24 @@
     perfc_incrc(map_domain_page_count);
 
     /* If we are the idle domain, ensure that we run on our own page tables. */
-    if ( unlikely(is_idle_vcpu(current)) )
+    d = current->domain;
+    if ( unlikely(is_idle_domain(d)) )
         __sync_lazy_execstate();
+
+    cache = &d->arch.mapcache;
 
     spin_lock(&cache->lock);
 
     /* Has some other CPU caused a wrap? We must flush if so. */
-    if ( cache->epoch != cache->shadow_epoch[vcpu] )
+    if ( unlikely(cache->epoch != cache->shadow_epoch[vcpu]) )
     {
-        perfc_incrc(domain_page_tlb_flush);
-        local_flush_tlb();
         cache->shadow_epoch[vcpu] = cache->epoch;
+        if ( NEED_FLUSH(tlbflush_time[smp_processor_id()],
+                        cache->tlbflush_timestamp) )
+        {
+            perfc_incrc(domain_page_tlb_flush);
+            local_flush_tlb();
+        }
     }
 
     do {
@@ -71,6 +79,7 @@
             perfc_incrc(domain_page_tlb_flush);
             local_flush_tlb();
             cache->shadow_epoch[vcpu] = ++cache->epoch;
+            cache->tlbflush_timestamp = tlbflush_current_time();
         }
 
         flags = 0;
diff -r 542cb7acb21a -r ed7888c838ad xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Tue Jan 10 17:25:45 2006
+++ b/xen/include/asm-x86/domain.h      Tue Jan 10 17:53:44 2006
@@ -17,6 +17,7 @@
     l1_pgentry_t *l1tab;
     unsigned int cursor;
     unsigned int epoch, shadow_epoch[MAX_VIRT_CPUS];
+    u32 tlbflush_timestamp;
     spinlock_t lock;
 };
 
diff -r 542cb7acb21a -r ed7888c838ad xen/include/xen/domain_page.h
--- a/xen/include/xen/domain_page.h     Tue Jan 10 17:25:45 2006
+++ b/xen/include/xen/domain_page.h     Tue Jan 10 17:53:44 2006
@@ -17,21 +17,21 @@
 
 /*
  * Maps a given range of page frames, returning the mapped virtual address. The
- * pages are now accessible within the current domain until a corresponding
+ * pages are now accessible within the current VCPU until a corresponding
  * call to unmap_domain_page().
  */
 extern void *map_domain_pages(unsigned long pfn, unsigned int order);
 
 /*
  * Pass a VA within the first page of a range previously mapped in the context
- * of the currently-executing domain via a call to map_domain_pages(). Those
+ * of the currently-executing VCPU via a call to map_domain_pages(). Those
  * pages will then be removed from the mapping lists.
  */
 extern void unmap_domain_pages(void *va, unsigned int order);
 
 /*
  * Similar to the above calls, except the mapping is accessible in all
- * address spaces (not just within the domain that created the mapping). Global
+ * address spaces (not just within the VCPU that created the mapping). Global
  * mappings can also be unmapped from any context.
  */
 extern void *map_domain_page_global(unsigned long pfn);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.