[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC v7 11/11] pvqspinlock, x86: Enable qspinlock PV support for XEN



This patch adds the necessary KVM specific code to allow XEN to support
the sleeping and CPU kicking operations needed by the queue spinlock PV
code.

Signed-off-by: Waiman Long <Waiman.Long@xxxxxx>
---
 arch/x86/xen/spinlock.c |   90 ++++++++++++++++++++++++++++++++++++++++++++---
 kernel/Kconfig.locks    |    2 +-
 2 files changed, 86 insertions(+), 6 deletions(-)

diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 06f4a64..78891ee 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -17,6 +17,12 @@
 #include "xen-ops.h"
 #include "debugfs.h"
 
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifndef CONFIG_QUEUE_SPINLOCK
+
 enum xen_contention_stat {
        TAKEN_SLOW,
        TAKEN_SLOW_PICKUP,
@@ -100,12 +106,9 @@ struct xen_lock_waiting {
        __ticket_t want;
 };
 
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
 static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
 static cpumask_t waiting_cpus;
 
-static bool xen_pvspin = true;
 __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
 {
        int irq = __this_cpu_read(lock_kicker_irq);
@@ -213,6 +216,74 @@ static void xen_unlock_kick(struct arch_spinlock *lock, 
__ticket_t next)
        }
 }
 
+#else /* CONFIG_QUEUE_SPINLOCK */
+
+#ifdef CONFIG_XEN_DEBUG_FS
+static u32 kick_stats;         /* CPU kick count    */
+static u32 hibernate_stats;    /* Hibernation count */
+
+static inline void inc_kick_stats(void)
+{
+       add_smp(&kick_stats, 1);
+}
+
+static inline void inc_hib_stats(void)
+{
+       add_smp(&hibernate_stats, 1);
+}
+#else /* CONFIG_XEN_DEBUG_FS */
+static inline void inc_kick_stats(void)
+{
+}
+
+static inline void inc_hib_stats(void)
+{
+
+}
+#endif /* CONFIG_XEN_DEBUG_FS */
+
+static void xen_kick_cpu_type(int cpu)
+{
+       xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+       inc_kick_stats();
+}
+
+/*
+ * Halt the current CPU & release it back to the host
+ */
+static void xen_hibernate(void)
+{
+       int irq = __this_cpu_read(lock_kicker_irq);
+       unsigned long flags;
+
+       /* If kicker interrupts not initialized yet, just spin */
+       if (irq == -1)
+               return;
+
+       /*
+        * Make sure an interrupt handler can't upset things in a
+        * partially setup state.
+        */
+       local_irq_save(flags);
+
+       inc_hib_stats();
+       /* clear pending */
+       xen_clear_irq_pending(irq);
+
+       /* Allow interrupts while blocked */
+       local_irq_restore(flags);
+
+       /*
+        * If an interrupt happens here, it will leave the wakeup irq
+        * pending, which will cause xen_poll_irq() to return
+        * immediately.
+        */
+
+       /* Block until irq becomes pending (or perhaps a spurious wakeup) */
+       xen_poll_irq(irq);
+}
+#endif /* CONFIG_QUEUE_SPINLOCK */
+
 static irqreturn_t dummy_handler(int irq, void *dev_id)
 {
        BUG();
@@ -258,7 +329,6 @@ void xen_uninit_lock_cpu(int cpu)
        per_cpu(irq_name, cpu) = NULL;
 }
 
-
 /*
  * Our init of PV spinlocks is split in two init functions due to us
  * using paravirt patching and jump labels patching and having to do
@@ -275,8 +345,13 @@ void __init xen_init_spinlocks(void)
                return;
        }
 
+#ifdef CONFIG_QUEUE_SPINLOCK
+       pv_lock_ops.kick_cpu = xen_kick_cpu_type;
+       pv_lock_ops.hibernate = xen_hibernate;
+#else
        pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
        pv_lock_ops.unlock_kick = xen_unlock_kick;
+#endif
 }
 
 /*
@@ -318,6 +393,7 @@ static int __init xen_spinlock_debugfs(void)
 
        d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
 
+#ifndef CONFIG_QUEUE_SPINLOCK
        debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
 
        debugfs_create_u32("taken_slow", 0444, d_spin_debug,
@@ -337,7 +413,11 @@ static int __init xen_spinlock_debugfs(void)
 
        debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
                                spinlock_stats.histo_spin_blocked, 
HISTO_BUCKETS + 1);
-
+#else /* CONFIG_QUEUE_SPINLOCK */
+       debugfs_create_u32("kick_stats", 0644, d_spin_debug, &kick_stats);
+       debugfs_create_u32("hibernate_stats",
+                          0644, d_spin_debug, &hibernate_stats);
+#endif /* CONFIG_QUEUE_SPINLOCK */
        return 0;
 }
 fs_initcall(xen_spinlock_debugfs);
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index a70fdeb..451e392 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -229,4 +229,4 @@ config ARCH_USE_QUEUE_SPINLOCK
 
 config QUEUE_SPINLOCK
        def_bool y if ARCH_USE_QUEUE_SPINLOCK
-       depends on SMP && (!PARAVIRT_SPINLOCKS || !XEN)
+       depends on SMP
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.