[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: Remove invlpg_works_ok and invlpg only single-page regions.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1192615952 -3600
# Node ID 76bf1fcaf01d2d344e4c4c4a127bf20fece77d4a
# Parent  86bd91e90eec5da1dce9f25cd101a7034dec67cc
x86: Remove invlpg_works_ok and invlpg only single-page regions.

The flush_area_local() interface was unclear about whether a
multi-page region (2M/4M/1G) had to be mapped by a superpage, and
indeed some callers (map_pages_to_xen()) already would specify
FLUSH_LEVEL(2) for a region actually mapped by 4kB PTEs.

The safest fix is to relax the interface and do a full TLB flush in
these cases. My suspicion is that these cases are rare enough that the
cost of INVLPG versus full flush will be unimportant.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/cpu/amd.c          |    5 -----
 xen/arch/x86/cpu/common.c       |    1 -
 xen/arch/x86/cpu/intel.c        |   12 ------------
 xen/arch/x86/flushtlb.c         |    8 +++++++-
 xen/arch/x86/setup.c            |    2 +-
 xen/include/asm-x86/flushtlb.h  |    1 +
 xen/include/asm-x86/processor.h |    1 -
 7 files changed, 9 insertions(+), 21 deletions(-)

diff -r 86bd91e90eec -r 76bf1fcaf01d xen/arch/x86/cpu/amd.c
--- a/xen/arch/x86/cpu/amd.c    Wed Oct 17 10:02:49 2007 +0100
+++ b/xen/arch/x86/cpu/amd.c    Wed Oct 17 11:12:32 2007 +0100
@@ -372,11 +372,6 @@ static void __init init_amd(struct cpuin
        /* Prevent TSC drift in non single-processor, single-core platforms. */
        if ((smp_processor_id() == 1) && c1_ramping_may_cause_clock_drift(c))
                disable_c1_ramping();
-
-       /* Support INVLPG of superpages? */
-       __set_bit(2, &c->invlpg_works_ok);
-       if ( cpu_has(c, X86_FEATURE_PAGE1GB) )
-               __set_bit(3, &c->invlpg_works_ok);
 
        start_svm(c);
 }
diff -r 86bd91e90eec -r 76bf1fcaf01d xen/arch/x86/cpu/common.c
--- a/xen/arch/x86/cpu/common.c Wed Oct 17 10:02:49 2007 +0100
+++ b/xen/arch/x86/cpu/common.c Wed Oct 17 11:12:32 2007 +0100
@@ -314,7 +314,6 @@ void __devinit identify_cpu(struct cpuin
        c->x86_vendor_id[0] = '\0'; /* Unset */
        c->x86_model_id[0] = '\0';  /* Unset */
        c->x86_max_cores = 1;
-       c->invlpg_works_ok = 1; /* no superpage INVLPG by default */
        c->x86_clflush_size = 0;
        memset(&c->x86_capability, 0, sizeof c->x86_capability);
 
diff -r 86bd91e90eec -r 76bf1fcaf01d xen/arch/x86/cpu/intel.c
--- a/xen/arch/x86/cpu/intel.c  Wed Oct 17 10:02:49 2007 +0100
+++ b/xen/arch/x86/cpu/intel.c  Wed Oct 17 11:12:32 2007 +0100
@@ -123,18 +123,6 @@ static void __devinit init_intel(struct 
        if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
                clear_bit(X86_FEATURE_SEP, c->x86_capability);
 
-       /* Supports INVLPG of superpages? */
-       __set_bit(2, &c->invlpg_works_ok);
-       if (/* PentiumPro erratum 30 */
-           (c->x86 == 6 && c->x86_model == 1 && c->x86_mask < 9) ||
-           /* Dual-Core Intel Xeon 3000/5100 series erratum 89/90 */
-           /* Quad-Core Intel Xeon 3200/5300 series erratum 89/88 */
-           /* Intel Core2 erratum 89 */
-           (c->x86 == 6 && c->x86_model == 15 ) ||
-           /* Dual-Core Intel Xeon LV/ULV erratum 75 */
-           (c->x86 == 6 && c->x86_model == 14 ))
-               __clear_bit(2, &c->invlpg_works_ok);
-
        /* Names for the Pentium II/Celeron processors 
           detectable only by also checking the cache size.
           Dixon is NOT a Celeron. */
diff -r 86bd91e90eec -r 76bf1fcaf01d xen/arch/x86/flushtlb.c
--- a/xen/arch/x86/flushtlb.c   Wed Oct 17 10:02:49 2007 +0100
+++ b/xen/arch/x86/flushtlb.c   Wed Oct 17 11:12:32 2007 +0100
@@ -108,8 +108,14 @@ void flush_area_local(const void *va, un
 
     if ( flags & (FLUSH_TLB|FLUSH_TLB_GLOBAL) )
     {
-        if ( (level != 0) && test_bit(level, &c->invlpg_works_ok) )
+        if ( level == 1 )
         {
+            /*
+             * We don't INVLPG multi-page regions because the 2M/4M/1G
+             * region may not have been mapped with a superpage. Also there
+             * are various errata surrounding INVLPG usage on superpages, and
+             * a full flush is in any case not *that* expensive.
+             */
             asm volatile ( "invlpg %0"
                            : : "m" (*(const char *)(va)) : "memory" );
         }
diff -r 86bd91e90eec -r 76bf1fcaf01d xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Wed Oct 17 10:02:49 2007 +0100
+++ b/xen/arch/x86/setup.c      Wed Oct 17 11:12:32 2007 +0100
@@ -114,7 +114,7 @@ struct tss_struct init_tss[NR_CPUS];
 
 char __attribute__ ((__section__(".bss.stack_aligned"))) 
cpu0_stack[STACK_SIZE];
 
-struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, 1, -1 };
+struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1 };
 
 #if CONFIG_PAGING_LEVELS > 2
 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
diff -r 86bd91e90eec -r 76bf1fcaf01d xen/include/asm-x86/flushtlb.h
--- a/xen/include/asm-x86/flushtlb.h    Wed Oct 17 10:02:49 2007 +0100
+++ b/xen/include/asm-x86/flushtlb.h    Wed Oct 17 11:12:32 2007 +0100
@@ -78,6 +78,7 @@ void write_cr3(unsigned long cr3);
   *  1 -> 4kB area containing specified virtual address
   *  2 -> 4MB/2MB area containing specified virtual address
   *  3 -> 1GB area containing specified virtual address (x86/64 only)
+  * NB. Multi-page areas do not need to have been mapped with a superpage.
   */
 #define FLUSH_LEVEL_MASK 0x0f
 #define FLUSH_LEVEL(x)   (x)
diff -r 86bd91e90eec -r 76bf1fcaf01d xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h   Wed Oct 17 10:02:49 2007 +0100
+++ b/xen/include/asm-x86/processor.h   Wed Oct 17 11:12:32 2007 +0100
@@ -164,7 +164,6 @@ struct cpuinfo_x86 {
     __u8 x86_vendor;     /* CPU vendor */
     __u8 x86_model;
     __u8 x86_mask;
-    __u8 invlpg_works_ok;
     int  cpuid_level;    /* Maximum supported CPUID level, -1=no CPUID */
     unsigned int x86_capability[NCAPINTS];
     char x86_vendor_id[16];

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.