[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[patch 27/37] cpu/hotplug: Remove unused state functions



All users converted to the hotplug core mechanism.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
 include/linux/cpu.h |    2 -
 kernel/smpboot.c    |   75 ----------------------------------------------------
 2 files changed, 77 deletions(-)

--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -193,8 +193,6 @@ static inline void play_idle(unsigned lo
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-bool cpu_wait_death(unsigned int cpu, int seconds);
-bool cpu_report_death(void);
 void cpuhp_report_idle_dead(void);
 #else
 static inline void cpuhp_report_idle_dead(void) { }
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -325,78 +325,3 @@ void smpboot_unregister_percpu_thread(st
        cpus_read_unlock();
 }
 EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
-
-#ifndef CONFIG_HOTPLUG_CORE_SYNC
-static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = 
ATOMIC_INIT(CPU_POST_DEAD);
-
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * Wait for the specified CPU to exit the idle loop and die.
- */
-bool cpu_wait_death(unsigned int cpu, int seconds)
-{
-       int jf_left = seconds * HZ;
-       int oldstate;
-       bool ret = true;
-       int sleep_jf = 1;
-
-       might_sleep();
-
-       /* The outgoing CPU will normally get done quite quickly. */
-       if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
-               goto update_state_early;
-       udelay(5);
-
-       /* But if the outgoing CPU dawdles, wait increasingly long times. */
-       while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
-               schedule_timeout_uninterruptible(sleep_jf);
-               jf_left -= sleep_jf;
-               if (jf_left <= 0)
-                       break;
-               sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
-       }
-update_state_early:
-       oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
-update_state:
-       if (oldstate == CPU_DEAD) {
-               /* Outgoing CPU died normally, update state. */
-               smp_mb(); /* atomic_read() before update. */
-               atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
-       } else {
-               /* Outgoing CPU still hasn't died, set state accordingly. */
-               if (!atomic_try_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
-                                       &oldstate, CPU_BROKEN))
-                       goto update_state;
-               ret = false;
-       }
-       return ret;
-}
-
-/*
- * Called by the outgoing CPU to report its successful death.  Return
- * false if this report follows the surviving CPU's timing out.
- *
- * A separate "CPU_DEAD_FROZEN" is used when the surviving CPU
- * timed out.  This approach allows architectures to omit calls to
- * cpu_check_up_prepare() and cpu_set_state_online() without defeating
- * the next cpu_wait_death()'s polling loop.
- */
-bool cpu_report_death(void)
-{
-       int oldstate;
-       int newstate;
-       int cpu = smp_processor_id();
-
-       oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
-       do {
-               if (oldstate != CPU_BROKEN)
-                       newstate = CPU_DEAD;
-               else
-                       newstate = CPU_DEAD_FROZEN;
-       } while (!atomic_try_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
-                                    &oldstate, newstate));
-       return newstate == CPU_DEAD;
-}
-
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-#endif /* !CONFIG_HOTPLUG_CORE_SYNC */




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.