[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH AUTOSEL 6.6 3/3] xen: privcmd: Switch from mutex to spinlock for irqfds
From: Viresh Kumar <viresh.kumar@xxxxxxxxxx> [ Upstream commit 1c682593096a487fd9aebc079a307ff7a6d054a3 ] irqfd_wakeup() gets EPOLLHUP, when it is called by eventfd_release() by way of wake_up_poll(&ctx->wqh, EPOLLHUP), which gets called under spin_lock_irqsave(). We can't use a mutex here as it will lead to a deadlock. Fix it by switching over to a spin lock. Reported-by: Al Viro <viro@xxxxxxxxxxxxxxxxxx> Signed-off-by: Viresh Kumar <viresh.kumar@xxxxxxxxxx> Reviewed-by: Juergen Gross <jgross@xxxxxxxx> Link: https://lore.kernel.org/r/a66d7a7a9001424d432f52a9fc3931a1f345464f.1718703669.git.viresh.kumar@xxxxxxxxxx Signed-off-by: Juergen Gross <jgross@xxxxxxxx> Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> --- drivers/xen/privcmd.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index da88173bac432..923f064c7e3e9 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -841,7 +841,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, #ifdef CONFIG_XEN_PRIVCMD_IRQFD /* Irqfd support */ static struct workqueue_struct *irqfd_cleanup_wq; -static DEFINE_MUTEX(irqfds_lock); +static DEFINE_SPINLOCK(irqfds_lock); static LIST_HEAD(irqfds_list); struct privcmd_kernel_irqfd { @@ -905,9 +905,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key) irqfd_inject(kirqfd); if (flags & EPOLLHUP) { - mutex_lock(&irqfds_lock); + unsigned long flags; + + spin_lock_irqsave(&irqfds_lock, flags); irqfd_deactivate(kirqfd); - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); } return 0; @@ -925,6 +927,7 @@ irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt) static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) { struct privcmd_kernel_irqfd *kirqfd, *tmp; + unsigned long flags; __poll_t events; struct fd f; void *dm_op; @@ -964,18 +967,18 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup); init_poll_funcptr(&kirqfd->pt, irqfd_poll_func); - mutex_lock(&irqfds_lock); + spin_lock_irqsave(&irqfds_lock, flags); list_for_each_entry(tmp, &irqfds_list, list) { if (kirqfd->eventfd == tmp->eventfd) { ret = -EBUSY; - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); goto error_eventfd; } } list_add_tail(&kirqfd->list, &irqfds_list); - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); /* * Check if there was an event already pending on the eventfd before we @@ -1007,12 +1010,13 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd) { struct privcmd_kernel_irqfd *kirqfd; struct eventfd_ctx *eventfd; + unsigned long flags; eventfd = eventfd_ctx_fdget(irqfd->fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd); - mutex_lock(&irqfds_lock); + spin_lock_irqsave(&irqfds_lock, flags); list_for_each_entry(kirqfd, &irqfds_list, list) { if (kirqfd->eventfd == eventfd) { @@ -1021,7 +1025,7 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd) } } - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); eventfd_ctx_put(eventfd); @@ -1069,13 +1073,14 @@ static int privcmd_irqfd_init(void) static void privcmd_irqfd_exit(void) { struct privcmd_kernel_irqfd *kirqfd, *tmp; + unsigned long flags; - mutex_lock(&irqfds_lock); + spin_lock_irqsave(&irqfds_lock, flags); list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list) irqfd_deactivate(kirqfd); - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); destroy_workqueue(irqfd_cleanup_wq); } -- 2.43.0
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |