[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 10/13] xen/spinlock: add missing rspin_is_locked() and rspin_barrier()
Add rspin_is_locked() and rspin_barrier() in order to prepare differing spinlock_t and rspinlock_t types. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> --- V2: - partially carved out from V1 patch, partially new --- xen/arch/x86/mm/p2m-pod.c | 2 +- xen/common/domain.c | 2 +- xen/common/page_alloc.c | 2 +- xen/common/spinlock.c | 17 +++++++++++++++++ xen/drivers/char/console.c | 4 ++-- xen/drivers/passthrough/pci.c | 2 +- xen/include/xen/spinlock.h | 2 ++ 7 files changed, 25 insertions(+), 6 deletions(-) diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c index 5361c2c5b1..ad49c64ead 100644 --- a/xen/arch/x86/mm/p2m-pod.c +++ b/xen/arch/x86/mm/p2m-pod.c @@ -385,7 +385,7 @@ int p2m_pod_empty_cache(struct domain *d) /* After this barrier no new PoD activities can happen. */ BUG_ON(!d->is_dying); - spin_barrier(&p2m->pod.lock.lock); + rspin_barrier(&p2m->pod.lock.lock); lock_page_alloc(p2m); diff --git a/xen/common/domain.c b/xen/common/domain.c index 604f70ff5a..17da8e0746 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -974,7 +974,7 @@ int domain_kill(struct domain *d) case DOMDYING_alive: domain_pause(d); d->is_dying = DOMDYING_dying; - spin_barrier(&d->domain_lock); + rspin_barrier(&d->domain_lock); argo_destroy(d); vnuma_destroy(d->vnuma); domain_set_outstanding_pages(d, 0); diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index a25c00a7d4..14010b6fa5 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -476,7 +476,7 @@ unsigned long domain_adjust_tot_pages(struct domain *d, long pages) { long dom_before, dom_after, dom_claimed, sys_before, sys_after; - ASSERT(spin_is_locked(&d->page_alloc_lock)); + ASSERT(rspin_is_locked(&d->page_alloc_lock)); d->tot_pages += pages; /* diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c index c17ea42fe6..9159a5a7c3 100644 --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -458,6 +458,23 @@ void _spin_barrier(spinlock_t *lock) spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR); } +int rspin_is_locked(const rspinlock_t *lock) +{ + /* + * Recursive locks may be locked by another CPU, yet we return + * "false" here, making this function suitable only for use in + * ASSERT()s and alike. + */ + return lock->recurse_cpu == SPINLOCK_NO_CPU + ? spin_is_locked_common(&lock->tickets) + : lock->recurse_cpu == smp_processor_id(); +} + +void rspin_barrier(rspinlock_t *lock) +{ + spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR); +} + int rspin_trylock(rspinlock_t *lock) { unsigned int cpu = smp_processor_id(); diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c index d5d8b6cd77..63cad87125 100644 --- a/xen/drivers/char/console.c +++ b/xen/drivers/char/console.c @@ -328,7 +328,7 @@ static void cf_check do_dec_thresh(unsigned char key, struct cpu_user_regs *regs static void conring_puts(const char *str, size_t len) { - ASSERT(spin_is_locked(&console_lock)); + ASSERT(rspin_is_locked(&console_lock)); while ( len-- ) conring[CONRING_IDX_MASK(conringp++)] = *str++; @@ -757,7 +757,7 @@ static void __putstr(const char *str) { size_t len = strlen(str); - ASSERT(spin_is_locked(&console_lock)); + ASSERT(rspin_is_locked(&console_lock)); console_serial_puts(str, len); video_puts(str, len); diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c index 22342f07ac..f55c1a5cf4 100644 --- a/xen/drivers/passthrough/pci.c +++ b/xen/drivers/passthrough/pci.c @@ -64,7 +64,7 @@ void pcidevs_unlock(void) bool pcidevs_locked(void) { - return !!spin_is_locked(&_pcidevs_lock); + return rspin_is_locked(&_pcidevs_lock); } static struct radix_tree_root pci_segments; diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h index ccb1cafa5f..fb6ca1949a 100644 --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -239,6 +239,8 @@ void rspin_lock(rspinlock_t *lock); unsigned long __rspin_lock_irqsave(rspinlock_t *lock); void rspin_unlock(rspinlock_t *lock); void rspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags); +int rspin_is_locked(const rspinlock_t *lock); +void rspin_barrier(rspinlock_t *lock); #define spin_lock(l) _spin_lock(l) #define spin_lock_cb(l, c, d) _spin_lock_cb(l, c, d) -- 2.35.3
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |