[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Simplify and fix SMP irq and ipi teardown/setup on suspend/resume.



# HG changeset patch
# User cl349@xxxxxxxxxxxxxxxxxxxx
# Node ID b3785cbb723b8b355c1282232de0bd1cfbfb3556
# Parent  70be155e9e9cb5534c3e6d55aee3a4fbb28bf105
Simplify and fix SMP irq and ipi teardown/setup on suspend/resume.
Signed-off-by: Christian Limpach <Christian.Limpach@xxxxxxxxxxxx>

diff -r 70be155e9e9c -r b3785cbb723b 
linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c       Mon Aug 29 
17:15:56 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c       Mon Aug 29 
17:40:47 2005
@@ -135,6 +135,10 @@
  * low exit latency (ie sit in a loop waiting for
  * somebody to say that they'd like to reschedule)
  */
+#ifdef CONFIG_SMP
+extern void smp_suspend(void);
+extern void smp_resume(void);
+#endif
 void cpu_idle (void)
 {
        int cpu = _smp_processor_id();
@@ -149,6 +153,9 @@
 
                        if (cpu_is_offline(cpu)) {
                                local_irq_disable();
+#ifdef CONFIG_SMP
+                               smp_suspend();
+#endif
 #if defined(CONFIG_XEN) && defined(CONFIG_HOTPLUG_CPU)
                                /* Ack it.  From this point on until
                                   we get woken up, we're not allowed
@@ -159,6 +166,9 @@
                                HYPERVISOR_vcpu_down(cpu);
 #endif
                                play_dead();
+#ifdef CONFIG_SMP
+                               smp_resume();
+#endif
                                local_irq_enable();
                        }
 
@@ -789,10 +799,3 @@
                sp -= get_random_int() % 8192;
        return sp & ~0xf;
 }
-
-
-#ifndef CONFIG_X86_SMP
-void _restore_vcpu(void)
-{
-}
-#endif
diff -r 70be155e9e9c -r b3785cbb723b 
linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c       Mon Aug 29 
17:15:56 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c       Mon Aug 29 
17:40:47 2005
@@ -1601,32 +1601,71 @@
 
 void smp_suspend(void)
 {
-       /* XXX todo: take down time and ipi's on all cpus */
        local_teardown_timer_irq();
        smp_intr_exit();
 }
 
 void smp_resume(void)
 {
-       /* XXX todo: restore time and ipi's on all cpus */
        smp_intr_init();
        local_setup_timer_irq();
 }
 
-DECLARE_PER_CPU(int, timer_irq);
-
-void _restore_vcpu(void)
-{
-       int cpu = smp_processor_id();
-       extern atomic_t vcpus_rebooting;
-
-       /* We are the first thing the vcpu runs when it comes back,
-          and we are supposed to restore the IPIs and timer
-          interrupts etc.  When we return, the vcpu's idle loop will
-          start up again. */
-       _bind_virq_to_irq(VIRQ_TIMER, cpu, per_cpu(timer_irq, cpu));
-       _bind_virq_to_irq(VIRQ_DEBUG, cpu, per_cpu(ldebug_irq, cpu));
-       _bind_ipi_to_irq(RESCHEDULE_VECTOR, cpu, per_cpu(resched_irq, cpu) );
-       _bind_ipi_to_irq(CALL_FUNCTION_VECTOR, cpu, per_cpu(callfunc_irq, cpu) 
);
+static atomic_t vcpus_rebooting;
+
+static void restore_vcpu_ready(void)
+{
+
        atomic_dec(&vcpus_rebooting);
 }
+
+void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
+{
+       int r;
+       int gdt_pages;
+       r = HYPERVISOR_vcpu_pickle(vcpu, ctxt);
+       if (r != 0)
+               panic("pickling vcpu %d -> %d!\n", vcpu, r);
+
+       /* Translate from machine to physical addresses where necessary,
+          so that they can be translated to our new machine address space
+          after resume.  libxc is responsible for doing this to vcpu0,
+          but we do it to the others. */
+       gdt_pages = (ctxt->gdt_ents + 511) / 512;
+       ctxt->ctrlreg[3] = machine_to_phys(ctxt->ctrlreg[3]);
+       for (r = 0; r < gdt_pages; r++)
+               ctxt->gdt_frames[r] = mfn_to_pfn(ctxt->gdt_frames[r]);
+}
+
+int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
+{
+       int r;
+       int gdt_pages = (ctxt->gdt_ents + 511) / 512;
+
+       /* This is kind of a hack, and implicitly relies on the fact that
+          the vcpu stops in a place where all of the call clobbered
+          registers are already dead. */
+       ctxt->user_regs.esp -= 4;
+       ((unsigned long *)ctxt->user_regs.esp)[0] = ctxt->user_regs.eip;
+       ctxt->user_regs.eip = (unsigned long)restore_vcpu_ready;
+
+       /* De-canonicalise.  libxc handles this for vcpu 0, but we need
+          to do it for the other vcpus. */
+       ctxt->ctrlreg[3] = phys_to_machine(ctxt->ctrlreg[3]);
+       for (r = 0; r < gdt_pages; r++)
+               ctxt->gdt_frames[r] = pfn_to_mfn(ctxt->gdt_frames[r]);
+
+       atomic_set(&vcpus_rebooting, 1);
+       r = HYPERVISOR_boot_vcpu(vcpu, ctxt);
+       if (r != 0) {
+               printk(KERN_EMERG "Failed to reboot vcpu %d (%d)\n", vcpu, r);
+               return -1;
+       }
+
+       /* Make sure we wait for the new vcpu to come up before trying to do
+          anything with it or starting the next one. */
+       while (atomic_read(&vcpus_rebooting))
+               barrier();
+
+       return 0;
+}
diff -r 70be155e9e9c -r b3785cbb723b 
linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c     Mon Aug 29 17:15:56 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c     Mon Aug 29 17:40:47 2005
@@ -245,74 +245,6 @@
     spin_unlock(&irq_mapping_update_lock);
 }
 
-/* This is only used when a vcpu from an xm save.  The ipi is expected
-   to have been bound before we suspended, and so all of the xenolinux
-   state is set up; we only need to restore the Xen side of things.
-   The irq number has to be the same, but the evtchn number can
-   change. */
-void _bind_ipi_to_irq(int ipi, int vcpu, int irq)
-{
-    evtchn_op_t op;
-    int evtchn;
-
-    spin_lock(&irq_mapping_update_lock);
-
-    op.cmd = EVTCHNOP_bind_ipi;
-    if ( HYPERVISOR_event_channel_op(&op) != 0 )
-       panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, vcpu);
-    evtchn = op.u.bind_ipi.port;
-
-    printk("<0>IPI %d, old evtchn %d, evtchn %d.\n",
-          ipi, per_cpu(ipi_to_evtchn, vcpu)[ipi],
-          evtchn);
-
-    evtchn_to_irq[irq_to_evtchn[irq]] = -1;
-    irq_to_evtchn[irq] = -1;
-
-    evtchn_to_irq[evtchn] = irq;
-    irq_to_evtchn[irq]    = evtchn;
-
-    printk("<0>evtchn_to_irq[%d] = %d.\n", evtchn,
-          evtchn_to_irq[evtchn]);
-    per_cpu(ipi_to_evtchn, vcpu)[ipi] = evtchn;
-
-    bind_evtchn_to_cpu(evtchn, vcpu);
-
-    spin_unlock(&irq_mapping_update_lock);
-
-    clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_mask);
-    clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_pending);
-}
-
-void _bind_virq_to_irq(int virq, int cpu, int irq)
-{
-    evtchn_op_t op;
-    int evtchn;
-
-    spin_lock(&irq_mapping_update_lock);
-
-    op.cmd              = EVTCHNOP_bind_virq;
-    op.u.bind_virq.virq = virq;
-    if ( HYPERVISOR_event_channel_op(&op) != 0 )
-            panic("Failed to bind virtual IRQ %d\n", virq);
-    evtchn = op.u.bind_virq.port;
-
-    evtchn_to_irq[irq_to_evtchn[irq]] = -1;
-    irq_to_evtchn[irq] = -1;
-
-    evtchn_to_irq[evtchn] = irq;
-    irq_to_evtchn[irq]    = evtchn;
-
-    per_cpu(virq_to_irq, cpu)[virq] = irq;
-
-    bind_evtchn_to_cpu(evtchn, cpu);
-
-    spin_unlock(&irq_mapping_update_lock);
-
-    clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_mask);
-    clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_pending);
-}
-
 int bind_ipi_to_irq(int ipi)
 {
     evtchn_op_t op;
diff -r 70be155e9e9c -r b3785cbb723b 
linux-2.6-xen-sparse/arch/xen/kernel/reboot.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c     Mon Aug 29 17:15:56 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c     Mon Aug 29 17:40:47 2005
@@ -66,60 +66,6 @@
 #endif
 
 #ifdef CONFIG_SMP
-static void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
-{
-    int r;
-    int gdt_pages;
-    r = HYPERVISOR_vcpu_pickle(vcpu, ctxt);
-    if (r != 0)
-       panic("pickling vcpu %d -> %d!\n", vcpu, r);
-
-    /* Translate from machine to physical addresses where necessary,
-       so that they can be translated to our new machine address space
-       after resume.  libxc is responsible for doing this to vcpu0,
-       but we do it to the others. */
-    gdt_pages = (ctxt->gdt_ents + 511) / 512;
-    ctxt->ctrlreg[3] = machine_to_phys(ctxt->ctrlreg[3]);
-    for (r = 0; r < gdt_pages; r++)
-       ctxt->gdt_frames[r] = mfn_to_pfn(ctxt->gdt_frames[r]);
-}
-
-void _restore_vcpu(int cpu);
-
-atomic_t vcpus_rebooting;
-
-static int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
-{
-    int r;
-    int gdt_pages = (ctxt->gdt_ents + 511) / 512;
-
-    /* This is kind of a hack, and implicitly relies on the fact that
-       the vcpu stops in a place where all of the call clobbered
-       registers are already dead. */
-    ctxt->user_regs.esp -= 4;
-    ((unsigned long *)ctxt->user_regs.esp)[0] = ctxt->user_regs.eip;
-    ctxt->user_regs.eip = (unsigned long)_restore_vcpu;
-
-    /* De-canonicalise.  libxc handles this for vcpu 0, but we need
-       to do it for the other vcpus. */
-    ctxt->ctrlreg[3] = phys_to_machine(ctxt->ctrlreg[3]);
-    for (r = 0; r < gdt_pages; r++)
-       ctxt->gdt_frames[r] = pfn_to_mfn(ctxt->gdt_frames[r]);
-
-    atomic_set(&vcpus_rebooting, 1);
-    r = HYPERVISOR_boot_vcpu(vcpu, ctxt);
-    if (r != 0) {
-       printk(KERN_EMERG "Failed to reboot vcpu %d (%d)\n", vcpu, r);
-       return -1;
-    }
-
-    /* Make sure we wait for the new vcpu to come up before trying to do
-       anything with it or starting the next one. */
-    while (atomic_read(&vcpus_rebooting))
-       barrier();
-
-    return 0;
-}
 #endif
 
 static int __do_suspend(void *ignore)
@@ -139,18 +85,20 @@
     extern int gnttab_suspend(void);
     extern int gnttab_resume(void);
 
-#ifdef CONFIG_SMP
-    extern void smp_suspend(void);
-    extern void smp_resume(void);
-#endif
     extern void time_suspend(void);
     extern void time_resume(void);
     extern unsigned long max_pfn;
     extern unsigned int *pfn_to_mfn_frame_list;
 
 #ifdef CONFIG_SMP
+    extern void smp_suspend(void);
+    extern void smp_resume(void);
+
     static vcpu_guest_context_t suspended_cpu_records[NR_CPUS];
     cpumask_t prev_online_cpus, prev_present_cpus;
+
+    void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
+    int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
 #endif
 
     int err = 0;
diff -r 70be155e9e9c -r b3785cbb723b 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c     Mon Aug 29 
17:15:56 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c     Mon Aug 29 
17:40:47 2005
@@ -1277,21 +1277,23 @@
 
 void smp_suspend(void)
 {
-       /* XXX todo: take down time and ipi's on all cpus */
        local_teardown_timer_irq();
        smp_intr_exit();
 }
 
 void smp_resume(void)
 {
-       /* XXX todo: restore time and ipi's on all cpus */
        smp_intr_init();
        local_setup_timer_irq();
 }
 
-void _restore_vcpu(void)
-{
-       /* XXX need to write this */
-}
-
-#endif
+void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
+{
+}
+
+int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
+{
+       return 0;
+}
+
+#endif

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.