[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] Fix broken switch stack



# HG changeset patch
# User awilliam@xxxxxxxxxxxx
# Date 1169578109 25200
# Node ID b194a9f3eba256b5ae56405b2b535e7798a389de
# Parent  d12ea0bfecce0aa5b62a9441185330b601487838
[IA64] Fix broken switch stack

unw_init_running() needs to be called later in the OS INIT handler
to prevent the switch stack from being broken by calls to other
functions.

Signed-off-by: Akio Takebe <takebe_akio@xxxxxxxxxxxxxx>
---
 xen/arch/ia64/linux-xen/mca.c |   64 ++++++++++++++++++++++++++++--------------
 1 files changed, 43 insertions(+), 21 deletions(-)

diff -r d12ea0bfecce -r b194a9f3eba2 xen/arch/ia64/linux-xen/mca.c
--- a/xen/arch/ia64/linux-xen/mca.c     Tue Jan 23 10:52:07 2007 -0700
+++ b/xen/arch/ia64/linux-xen/mca.c     Tue Jan 23 11:48:29 2007 -0700
@@ -684,16 +684,7 @@ fetch_min_state (pal_min_state_area_t *m
 #ifdef XEN
 static spinlock_t init_dump_lock = SPIN_LOCK_UNLOCKED;
 static spinlock_t show_stack_lock = SPIN_LOCK_UNLOCKED;
-
-static void
-save_ksp (struct unw_frame_info *info, void *arg)
-{
-       current->arch._thread.ksp = (__u64)(info->sw) - 16;
-       wmb();
-}
-
-/* FIXME */
-int try_crashdump(struct pt_regs *a) { return 0; }
+static atomic_t num_stopped_cpus = ATOMIC_INIT(0);
 
 #define CPU_FLUSH_RETRY_MAX 5
 static void
@@ -716,6 +707,35 @@ init_cache_flush (void)
        }
        printk("\nPAL cache flush failed. status=%ld\n",rval);
 }
+
+static void inline
+save_ksp (struct unw_frame_info *info)
+{
+       current->arch._thread.ksp = (__u64)(info->sw) - 16;
+       wmb();
+       init_cache_flush();
+}      
+
+static void
+freeze_cpu_osinit (struct unw_frame_info *info, void *arg)
+{
+       save_ksp(info);
+       atomic_inc(&num_stopped_cpus);
+       printk("%s: CPU%d init handler done\n",
+              __FUNCTION__, smp_processor_id());
+       for (;;)
+               local_irq_disable();
+}
+
+/* FIXME */
+static void
+try_crashdump(struct unw_frame_info *info, void *arg)
+{ 
+       save_ksp(info);
+       printk("\nINIT dump complete.  Please reboot now.\n");
+       for (;;)
+               local_irq_disable();
+}
 #endif /* XEN */
 
 static void
@@ -741,7 +761,8 @@ init_handler_platform (pal_min_state_are
        show_min_state(ms);
 
 #ifdef XEN
-       printk("Backtrace of current vcpu (vcpu_id %d)\n", current->vcpu_id);
+       printk("Backtrace of current vcpu (vcpu_id %d of domid %d)\n",
+              current->vcpu_id, current->domain->domain_id);
 #else
        printk("Backtrace of current task (pid %d, %s)\n", current->pid, 
current->comm);
        fetch_min_state(ms, pt, sw);
@@ -749,20 +770,21 @@ init_handler_platform (pal_min_state_are
        unw_init_from_interruption(&info, current, pt, sw);
        ia64_do_show_stack(&info, NULL);
 #ifdef XEN
-       unw_init_running(save_ksp, NULL);
        spin_unlock(&show_stack_lock);
-       wmb();
-       init_cache_flush();
 
        if (spin_trylock(&init_dump_lock)) {
 #ifdef CONFIG_SMP
-               udelay(5*1000000);
-#endif
-               if (try_crashdump(pt) == 0)
-                       printk("\nINIT dump complete.  Please reboot now.\n");
-       }
-       printk("%s: CPU%d init handler done\n",
-              __FUNCTION__, smp_processor_id());
+               int other_cpus = num_online_cpus() - 1;
+               int wait = 1000 * other_cpus;
+
+               while ((atomic_read(&num_stopped_cpus) != other_cpus) && wait--)
+                       udelay(1000);
+               if (other_cpus && wait < 0)
+                       printk("timeout %d\n", atomic_read(&num_stopped_cpus));
+#endif
+               unw_init_running(try_crashdump, pt);
+       }
+       unw_init_running(freeze_cpu_osinit, NULL);
 #else /* XEN */
 #ifdef CONFIG_SMP
        /* read_trylock() would be handy... */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.