[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Upgrade MTRR support to that from Linux 2.6.11.



ChangeSet 1.1806, 2005/03/28 18:51:38+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        Upgrade MTRR support to that from Linux 2.6.11.
        Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>



 mtrr/generic.c |   22 +++++++++++++++++-----
 mtrr/main.c    |   24 +++++++++---------------
 smpboot.c      |   11 -----------
 3 files changed, 26 insertions(+), 31 deletions(-)


diff -Nru a/xen/arch/x86/mtrr/generic.c b/xen/arch/x86/mtrr/generic.c
--- a/xen/arch/x86/mtrr/generic.c       2005-03-28 13:05:52 -05:00
+++ b/xen/arch/x86/mtrr/generic.c       2005-03-28 13:05:52 -05:00
@@ -8,7 +8,6 @@
 #include <asm/msr.h>
 #include <asm/system.h>
 #include <asm/cpufeature.h>
-//#include <asm/tlbflush.h>
 #include "mtrr.h"
 
 struct mtrr_state {
@@ -232,6 +231,13 @@
 static u32 deftype_lo, deftype_hi;
 static spinlock_t set_atomicity_lock = SPIN_LOCK_UNLOCKED;
 
+/*
+ * Since we are disabling the cache don't allow any interrupts - they
+ * would run extremely slow and would only increase the pain.  The caller must
+ * ensure that local interrupts are disabled and are reenabled after post_set()
+ * has been called.
+ */
+
 static void prepare_set(void)
 {
        unsigned long cr0;
@@ -239,18 +245,18 @@
        /*  Note that this is not ideal, since the cache is only 
flushed/disabled
           for this CPU while the MTRRs are changed, but changing this requires
           more invasive changes to the way the kernel boots  */
+
        spin_lock(&set_atomicity_lock);
 
        /*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
        cr0 = read_cr0() | 0x40000000;  /* set CD flag */
-       wbinvd();
        write_cr0(cr0);
        wbinvd();
 
        /*  Save value of CR4 and clear Page Global Enable (bit 7)  */
        if ( cpu_has_pge ) {
                cr4 = read_cr4();
-               write_cr4(cr4 & (unsigned char) ~(1 << 7));
+               write_cr4(cr4 & ~X86_CR4_PGE);
        }
 
        /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
@@ -265,8 +271,7 @@
 
 static void post_set(void)
 {
-       /*  Flush caches and TLBs  */
-       wbinvd();
+       /*  Flush TLBs (no need to flush caches - they are disabled)  */
        __flush_tlb();
 
        /* Intel (P6) standard MTRRs */
@@ -284,13 +289,16 @@
 static void generic_set_all(void)
 {
        unsigned long mask, count;
+       unsigned long flags;
 
+       local_irq_save(flags);
        prepare_set();
 
        /* Actually set the state */
        mask = set_mtrr_state(deftype_lo,deftype_hi);
 
        post_set();
+       local_irq_restore(flags);
 
        /*  Use the atomic bitops to update the global mask  */
        for (count = 0; count < sizeof mask * 8; ++count) {
@@ -313,6 +321,9 @@
     [RETURNS] Nothing.
 */
 {
+       unsigned long flags;
+
+       local_irq_save(flags);
        prepare_set();
 
        if (size == 0) {
@@ -327,6 +338,7 @@
        }
 
        post_set();
+       local_irq_restore(flags);
 }
 
 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned 
int type)
diff -Nru a/xen/arch/x86/mtrr/main.c b/xen/arch/x86/mtrr/main.c
--- a/xen/arch/x86/mtrr/main.c  2005-03-28 13:05:52 -05:00
+++ b/xen/arch/x86/mtrr/main.c  2005-03-28 13:05:52 -05:00
@@ -167,10 +167,8 @@
        local_irq_save(flags);
 
        atomic_dec(&data->count);
-       while(!atomic_read(&data->gate)) {
+       while(!atomic_read(&data->gate))
                cpu_relax();
-               barrier();
-       }
 
        /*  The master has cleared me to execute  */
        if (data->smp_reg != ~0U) 
@@ -180,10 +178,9 @@
                mtrr_if->set_all();
 
        atomic_dec(&data->count);
-       while(atomic_read(&data->gate)) {
+       while(atomic_read(&data->gate))
                cpu_relax();
-               barrier();
-       }
+
        atomic_dec(&data->count);
        local_irq_restore(flags);
 }
@@ -248,10 +245,9 @@
 
        local_irq_save(flags);
 
-       while(atomic_read(&data.count)) {
+       while(atomic_read(&data.count))
                cpu_relax();
-               barrier();
-       }
+
        /* ok, reset count and toggle gate */
        atomic_set(&data.count, num_booting_cpus() - 1);
        atomic_set(&data.gate,1);
@@ -268,10 +264,9 @@
                mtrr_if->set(reg,base,size,type);
 
        /* wait for the others */
-       while(atomic_read(&data.count)) {
+       while(atomic_read(&data.count))
                cpu_relax();
-               barrier();
-       }
+
        atomic_set(&data.count, num_booting_cpus() - 1);
        atomic_set(&data.gate,0);
 
@@ -279,10 +274,9 @@
         * Wait here for everyone to have seen the gate change
         * So we're the last ones to touch 'data'
         */
-       while(atomic_read(&data.count)) {
+       while(atomic_read(&data.count))
                cpu_relax();
-               barrier();
-       }
+
        local_irq_restore(flags);
 }
 
diff -Nru a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    2005-03-28 13:05:52 -05:00
+++ b/xen/arch/x86/smpboot.c    2005-03-28 13:05:52 -05:00
@@ -351,13 +351,6 @@
 
     __sti();
 
-#ifdef CONFIG_MTRR
-    /*
-     * Must be done before calibration delay is computed
-     */
-    mtrr_init_secondary_cpu ();
-#endif
-
     Dprintk("Stack at about %p\n",&cpuid);
 
     /*
@@ -771,10 +764,6 @@
 {
     int apicid, bit;
 
-#ifdef CONFIG_MTRR
-    /*  Must be done before other processors booted  */
-    mtrr_init_boot_cpu ();
-#endif
     /* Initialize the logical to physical CPU number mapping */
     init_cpu_to_apicid();
 


-------------------------------------------------------
SF email is sponsored by - The IT Product Guide
Read honest & candid reviews on hundreds of IT Products from real users.
Discover which products truly live up to the hype. Start reading now.
http://ads.osdn.com/?ad_id=6595&alloc_id=14396&op=click
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxxx
https://lists.sourceforge.net/lists/listinfo/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.