[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Merge firebug.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk



ChangeSet 1.1428, 2005/05/10 14:41:01+01:00, cl349@xxxxxxxxxxxxxxxxxxxx

        Merge firebug.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
        into firebug.cl.cam.ac.uk:/local/scratch/cl349/xen-unstable.bk



 smpboot.c |  101 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++----
 1 files changed, 96 insertions(+), 5 deletions(-)


diff -Nru a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c 
b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c    2005-05-10 
11:07:43 -04:00
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c    2005-05-10 
11:07:43 -04:00
@@ -44,6 +44,9 @@
 #include <linux/smp_lock.h>
 #include <linux/irq.h>
 #include <linux/bootmem.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/percpu.h>
 
 #include <linux/delay.h>
 #include <linux/mc146818rtc.h>
@@ -88,7 +91,14 @@
 extern unsigned char trampoline_end  [];
 static unsigned char *trampoline_base;
 static int trampoline_exec;
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+/* State of each CPU. */
+DEFINE_PER_CPU(int, cpu_state) = { 0 };
+#endif
 
+#if 0
 /*
  * Currently trivial. Write the real->protected mode
  * bootstrap into the page concerned. The caller
@@ -495,6 +505,7 @@
                }
        }
        cpu_idle();
+       return 0;
 }
 
 /*
@@ -1281,6 +1292,9 @@
    who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
+       smp_commenced_mask = cpumask_of_cpu(0);
+       cpu_callin_map = cpumask_of_cpu(0);
+       mb();
        smp_boot_cpus(max_cpus);
 }
 
@@ -1290,19 +1304,96 @@
        cpu_set(smp_processor_id(), cpu_callout_map);
 }
 
-int __devinit __cpu_up(unsigned int cpu)
+#ifdef CONFIG_HOTPLUG_CPU
+
+/* must be called with the cpucontrol mutex held */
+static int __devinit cpu_enable(unsigned int cpu)
 {
-       /* This only works at boot for x86.  See "rewrite" above. */
-       if (cpu_isset(cpu, smp_commenced_mask)) {
-               local_irq_enable();
-               return -ENOSYS;
+       /* get the target out of its holding state */
+       per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+       wmb();
+
+       /* wait for the processor to ack it. timeout? */
+       while (!cpu_online(cpu))
+               cpu_relax();
+
+       fixup_irqs(cpu_online_map);
+       /* counter the disable in fixup_irqs() */
+       local_irq_enable();
+       return 0;
+}
+
+int __cpu_disable(void)
+{
+       cpumask_t map = cpu_online_map;
+       int cpu = smp_processor_id();
+
+       /*
+        * Perhaps use cpufreq to drop frequency, but that could go
+        * into generic code.
+        *
+        * We won't take down the boot processor on i386 due to some
+        * interrupts only being able to be serviced by the BSP.
+        * Especially so if we're not using an IOAPIC   -zwane
+        */
+       if (cpu == 0)
+               return -EBUSY;
+
+       /* Allow any queued timer interrupts to get serviced */
+       local_irq_enable();
+       mdelay(1);
+       local_irq_disable();
+
+       cpu_clear(cpu, map);
+       fixup_irqs(map);
+       /* It's now safe to remove this processor from the online map */
+       cpu_clear(cpu, cpu_online_map);
+       return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+       /* We don't do anything here: idle task is faking death itself. */
+       unsigned int i;
+
+       for (i = 0; i < 10; i++) {
+               /* They ack this in play_dead by setting CPU_DEAD */
+               if (per_cpu(cpu_state, cpu) == CPU_DEAD)
+                       return;
+               current->state = TASK_UNINTERRUPTIBLE;
+               schedule_timeout(HZ/10);
        }
+       printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+}
+#else /* ... !CONFIG_HOTPLUG_CPU */
+int __cpu_disable(void)
+{
+       return -ENOSYS;
+}
 
+void __cpu_die(unsigned int cpu)
+{
+       /* We said "no" in __cpu_disable */
+       BUG();
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+int __devinit __cpu_up(unsigned int cpu)
+{
        /* In case one didn't come up */
        if (!cpu_isset(cpu, cpu_callin_map)) {
+               printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
                local_irq_enable();
                return -EIO;
        }
+
+#ifdef CONFIG_HOTPLUG_CPU
+       /* Already up, and in cpu_quiescent now? */
+       if (cpu_isset(cpu, smp_commenced_mask)) {
+               cpu_enable(cpu);
+               return 0;
+       }
+#endif
 
        local_irq_enable();
        /* Unleash the CPU! */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.