[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 6/8] xen/common: use keyhandler locks when dumping data to console



Instead of using the normal locks use the keyhandler provided trylocks
with timeouts. This requires adding a percpu read_trylock and a special
primitive for the grant lock.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/common/event_channel.c |  3 ++-
 xen/common/grant_table.c   | 32 +++++++++++++++++++++++++++++---
 xen/common/livepatch.c     | 11 +++--------
 xen/common/spinlock.c      | 18 +++++++++++++++---
 xen/common/timer.c         | 15 +++++++++------
 xen/include/xen/rwlock.h   | 37 +++++++++++++++++++++++++++++++++++++
 6 files changed, 95 insertions(+), 21 deletions(-)

diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index e86e2bfab0..a8fd481cb8 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -1387,7 +1387,8 @@ static void domain_dump_evtchn_info(struct domain *d)
            "Polling vCPUs: {%*pbl}\n"
            "    port [p/m/s]\n", d->domain_id, d->max_vcpus, d->poll_mask);
 
-    spin_lock(&d->event_lock);
+    if ( !keyhandler_spin_lock(&d->event_lock, "could not get event lock") )
+        return;
 
     for ( port = 1; port < d->max_evtchns; ++port )
     {
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index c793927cd6..14d01950ab 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -335,6 +335,11 @@ static inline void grant_read_lock(struct grant_table *gt)
     percpu_read_lock(grant_rwlock, &gt->lock);
 }
 
+static inline int grant_read_trylock(struct grant_table *gt)
+{
+    return percpu_read_trylock(grant_rwlock, &gt->lock);
+}
+
 static inline void grant_read_unlock(struct grant_table *gt)
 {
     percpu_read_unlock(grant_rwlock, &gt->lock);
@@ -4040,6 +4045,24 @@ int gnttab_get_status_frame(struct domain *d, unsigned 
long idx,
     return rc;
 }
 
+static int keyhandler_grant_read_lock(struct domain *d)
+{
+    keyhandler_lock_body(int, grant_read_trylock(d->grant_table),
+                         "could not get grant lock for %pd\n", d);
+}
+
+static inline struct active_grant_entry *
+keyhandler_active_entry_acquire(struct grant_table *t, grant_ref_t e)
+{
+    struct active_grant_entry *act;
+
+    act = &_active_entry(t, e);
+    if ( !keyhandler_spin_lock(&act->lock, "could not acquire active entry") )
+        return NULL;
+
+    return act;
+}
+
 static void gnttab_usage_print(struct domain *rd)
 {
     int first = 1;
@@ -4047,11 +4070,12 @@ static void gnttab_usage_print(struct domain *rd)
     struct grant_table *gt = rd->grant_table;
     unsigned int nr_ents;
 
+    if ( !keyhandler_grant_read_lock(rd) )
+        return;
+
     printk("      -------- active --------       -------- shared --------\n");
     printk("[ref] localdom mfn      pin          localdom gmfn     flags\n");
 
-    grant_read_lock(gt);
-
     printk("grant-table for remote d%d (v%u)\n"
            "  %u frames (%u max), %u maptrack frames (%u max)\n",
            rd->domain_id, gt->gt_version,
@@ -4066,7 +4090,9 @@ static void gnttab_usage_print(struct domain *rd)
         uint16_t status;
         uint64_t frame;
 
-        act = active_entry_acquire(gt, ref);
+        act = keyhandler_active_entry_acquire(gt, ref);
+        if ( !act )
+            continue;
         if ( !act->pin )
         {
             active_entry_release(act);
diff --git a/xen/common/livepatch.c b/xen/common/livepatch.c
index 5e09dc990b..0f0a877704 100644
--- a/xen/common/livepatch.c
+++ b/xen/common/livepatch.c
@@ -2072,11 +2072,8 @@ static void livepatch_printall(unsigned char key)
     if ( !xen_build_id(&binary_id, &len) )
         printk("build-id: %*phN\n", len, binary_id);
 
-    if ( !spin_trylock(&payload_lock) )
-    {
-        printk("Lock held. Try again.\n");
+    if ( !keyhandler_spin_lock(&payload_lock, "could not get payload lock") )
         return;
-    }
 
     list_for_each_entry ( data, &payload_list, list )
     {
@@ -2096,11 +2093,9 @@ static void livepatch_printall(unsigned char key)
             {
                 spin_unlock(&payload_lock);
                 process_pending_softirqs();
-                if ( !spin_trylock(&payload_lock) )
-                {
-                    printk("Couldn't reacquire lock. Try again.\n");
+                if ( !keyhandler_spin_lock(&payload_lock,
+                                           "could not reacquire payload lock") 
)
                     return;
-                }
             }
         }
         if ( data->id.len )
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index 344981c54a..3204d24dfa 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -349,17 +349,23 @@ static struct lock_profile_anc 
lock_profile_ancs[LOCKPROF_TYPE_N];
 static struct lock_profile_qhead lock_profile_glb_q;
 static spinlock_t lock_profile_lock = SPIN_LOCK_UNLOCKED;
 
-static void spinlock_profile_iterate(lock_profile_subfunc *sub, void *par)
+static void spinlock_profile_iterate_locked(lock_profile_subfunc *sub,
+                                            void *par)
 {
     int i;
     struct lock_profile_qhead *hq;
     struct lock_profile *eq;
 
-    spin_lock(&lock_profile_lock);
     for ( i = 0; i < LOCKPROF_TYPE_N; i++ )
         for ( hq = lock_profile_ancs[i].head_q; hq; hq = hq->head_q )
             for ( eq = hq->elem_q; eq; eq = eq->next )
                 sub(eq, i, hq->idx, par);
+}
+
+static void spinlock_profile_iterate(lock_profile_subfunc *sub, void *par)
+{
+    spin_lock(&lock_profile_lock);
+    spinlock_profile_iterate_locked(sub, par);
     spin_unlock(&lock_profile_lock);
 }
 
@@ -389,7 +395,13 @@ void spinlock_profile_printall(unsigned char key)
     diff = now - lock_profile_start;
     printk("Xen lock profile info SHOW  (now = %"PRI_stime" total = "
            "%"PRI_stime")\n", now, diff);
-    spinlock_profile_iterate(spinlock_profile_print_elem, NULL);
+
+    if ( !keyhandler_spin_lock(&lock_profile_lock, "could not get lock") )
+        return;
+
+    spinlock_profile_iterate_locked(spinlock_profile_print_elem, NULL);
+
+    spin_unlock(&lock_profile_lock);
 }
 
 static void spinlock_profile_reset_elem(struct lock_profile *data,
diff --git a/xen/common/timer.c b/xen/common/timer.c
index 1bb265ceea..0a00857e2d 100644
--- a/xen/common/timer.c
+++ b/xen/common/timer.c
@@ -561,12 +561,15 @@ static void dump_timerq(unsigned char key)
         ts = &per_cpu(timers, i);
 
         printk("CPU%02d:\n", i);
-        spin_lock_irqsave(&ts->lock, flags);
-        for ( j = 1; j <= heap_metadata(ts->heap)->size; j++ )
-            dump_timer(ts->heap[j], now);
-        for ( t = ts->list; t != NULL; t = t->list_next )
-            dump_timer(t, now);
-        spin_unlock_irqrestore(&ts->lock, flags);
+        if ( keyhandler_spin_lock_irqsave(&ts->lock, &flags,
+                                          "could not get lock") )
+        {
+            for ( j = 1; j <= heap_metadata(ts->heap)->size; j++ )
+                dump_timer(ts->heap[j], now);
+            for ( t = ts->list; t != NULL; t = t->list_next )
+                dump_timer(t, now);
+            spin_unlock_irqrestore(&ts->lock, flags);
+        }
     }
 }
 
diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
index 3dfea1ac2a..add8577429 100644
--- a/xen/include/xen/rwlock.h
+++ b/xen/include/xen/rwlock.h
@@ -278,6 +278,41 @@ static inline void _percpu_read_lock(percpu_rwlock_t 
**per_cpudata,
     }
 }
 
+static inline int _percpu_read_trylock(percpu_rwlock_t **per_cpudata,
+                                         percpu_rwlock_t *percpu_rwlock)
+{
+    /* Validate the correct per_cpudata variable has been provided. */
+    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+    /* We cannot support recursion on the same lock. */
+    ASSERT(this_cpu_ptr(per_cpudata) != percpu_rwlock);
+    /*
+     * Detect using a second percpu_rwlock_t simulatenously and fallback
+     * to standard read_trylock.
+     */
+    if ( unlikely(this_cpu_ptr(per_cpudata) != NULL ) )
+        return read_trylock(&percpu_rwlock->rwlock);
+
+    /* Indicate this cpu is reading. */
+    this_cpu_ptr(per_cpudata) = percpu_rwlock;
+    smp_mb();
+    /* Check if a writer is waiting. */
+    if ( unlikely(percpu_rwlock->writer_activating) )
+    {
+        /* Let the waiting writer know we aren't holding the lock. */
+        this_cpu_ptr(per_cpudata) = NULL;
+        /* Try using the read lock to keep the lock fair. */
+        if ( !read_trylock(&percpu_rwlock->rwlock) )
+            return 0;
+        /* Set the per CPU data again and continue. */
+        this_cpu_ptr(per_cpudata) = percpu_rwlock;
+        /* Drop the read lock because we don't need it anymore. */
+        read_unlock(&percpu_rwlock->rwlock);
+    }
+
+    return 1;
+}
+
 static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
                 percpu_rwlock_t *percpu_rwlock)
 {
@@ -318,6 +353,8 @@ static inline void _percpu_write_unlock(percpu_rwlock_t 
**per_cpudata,
 
 #define percpu_read_lock(percpu, lock) \
     _percpu_read_lock(&get_per_cpu_var(percpu), lock)
+#define percpu_read_trylock(percpu, lock) \
+    _percpu_read_trylock(&get_per_cpu_var(percpu), lock)
 #define percpu_read_unlock(percpu, lock) \
     _percpu_read_unlock(&get_per_cpu_var(percpu), lock)
 #define percpu_write_lock(percpu, lock) \
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.