[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] xen-blkfront: make blkif_io_lock spinlock per-device
On Fri, Feb 17, 2012 at 12:04:44PM -0800, Steven Noonan wrote: > This patch moves the global blkif_io_lock to the per-device structure. The > spinlock seems to exists for two reasons: to disable IRQs when in the > interrupt > handlers for blkfront, and to protect the blkfront VBDs when a detachment is > requested. > > Having a global blkif_io_lock doesn't make sense given the use case, and it > drastically hinders performance due to contention. All VBDs with pending IOs > have to take the lock in order to get work done, which serializes everything > pretty badly. applied in #testing. > > Signed-off-by: Steven Noonan <snoonan@xxxxxxxxxx> > --- > drivers/block/xen-blkfront.c | 32 ++++++++++++++++---------------- > 1 files changed, 16 insertions(+), 16 deletions(-) > > diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c > index 7b2ec59..df9b8da 100644 > --- a/drivers/block/xen-blkfront.c > +++ b/drivers/block/xen-blkfront.c > @@ -81,6 +81,7 @@ static const struct block_device_operations > xlvbd_block_fops; > */ > struct blkfront_info > { > + spinlock_t io_lock; > struct mutex mutex; > struct xenbus_device *xbdev; > struct gendisk *gd; > @@ -104,8 +105,6 @@ struct blkfront_info > int is_ready; > }; > > -static DEFINE_SPINLOCK(blkif_io_lock); > - > static unsigned int nr_minors; > static unsigned long *minors; > static DEFINE_SPINLOCK(minor_lock); > @@ -413,7 +412,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 > sector_size) > struct request_queue *rq; > struct blkfront_info *info = gd->private_data; > > - rq = blk_init_queue(do_blkif_request, &blkif_io_lock); > + rq = blk_init_queue(do_blkif_request, &info->io_lock); > if (rq == NULL) > return -1; > > @@ -628,14 +627,14 @@ static void xlvbd_release_gendisk(struct blkfront_info > *info) > if (info->rq == NULL) > return; > > - spin_lock_irqsave(&blkif_io_lock, flags); > + spin_lock_irqsave(&info->io_lock, flags); > > /* No more blkif_request(). */ > blk_stop_queue(info->rq); > > /* No more gnttab callback work. */ > gnttab_cancel_free_callback(&info->callback); > - spin_unlock_irqrestore(&blkif_io_lock, flags); > + spin_unlock_irqrestore(&info->io_lock, flags); > > /* Flush gnttab callback work. Must be done with no locks held. */ > flush_work_sync(&info->work); > @@ -667,16 +666,16 @@ static void blkif_restart_queue(struct work_struct > *work) > { > struct blkfront_info *info = container_of(work, struct blkfront_info, > work); > > - spin_lock_irq(&blkif_io_lock); > + spin_lock_irq(&info->io_lock); > if (info->connected == BLKIF_STATE_CONNECTED) > kick_pending_request_queues(info); > - spin_unlock_irq(&blkif_io_lock); > + spin_unlock_irq(&info->io_lock); > } > > static void blkif_free(struct blkfront_info *info, int suspend) > { > /* Prevent new requests being issued until we fix things up. */ > - spin_lock_irq(&blkif_io_lock); > + spin_lock_irq(&info->io_lock); > info->connected = suspend ? > BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; > /* No more blkif_request(). */ > @@ -684,7 +683,7 @@ static void blkif_free(struct blkfront_info *info, int > suspend) > blk_stop_queue(info->rq); > /* No more gnttab callback work. */ > gnttab_cancel_free_callback(&info->callback); > - spin_unlock_irq(&blkif_io_lock); > + spin_unlock_irq(&info->io_lock); > > /* Flush gnttab callback work. Must be done with no locks held. */ > flush_work_sync(&info->work); > @@ -718,10 +717,10 @@ static irqreturn_t blkif_interrupt(int irq, void > *dev_id) > struct blkfront_info *info = (struct blkfront_info *)dev_id; > int error; > > - spin_lock_irqsave(&blkif_io_lock, flags); > + spin_lock_irqsave(&info->io_lock, flags); > > if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { > - spin_unlock_irqrestore(&blkif_io_lock, flags); > + spin_unlock_irqrestore(&info->io_lock, flags); > return IRQ_HANDLED; > } > > @@ -803,7 +802,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) > > kick_pending_request_queues(info); > > - spin_unlock_irqrestore(&blkif_io_lock, flags); > + spin_unlock_irqrestore(&info->io_lock, flags); > > return IRQ_HANDLED; > } > @@ -978,6 +977,7 @@ static int blkfront_probe(struct xenbus_device *dev, > } > > mutex_init(&info->mutex); > + spin_lock_init(&info->io_lock); > info->xbdev = dev; > info->vdevice = vdevice; > info->connected = BLKIF_STATE_DISCONNECTED; > @@ -1053,7 +1053,7 @@ static int blkif_recover(struct blkfront_info *info) > > xenbus_switch_state(info->xbdev, XenbusStateConnected); > > - spin_lock_irq(&blkif_io_lock); > + spin_lock_irq(&info->io_lock); > > /* Now safe for us to use the shared ring */ > info->connected = BLKIF_STATE_CONNECTED; > @@ -1064,7 +1064,7 @@ static int blkif_recover(struct blkfront_info *info) > /* Kick any other new requests queued since we resumed */ > kick_pending_request_queues(info); > > - spin_unlock_irq(&blkif_io_lock); > + spin_unlock_irq(&info->io_lock); > > return 0; > } > @@ -1254,10 +1254,10 @@ static void blkfront_connect(struct blkfront_info > *info) > xenbus_switch_state(info->xbdev, XenbusStateConnected); > > /* Kick pending requests. */ > - spin_lock_irq(&blkif_io_lock); > + spin_lock_irq(&info->io_lock); > info->connected = BLKIF_STATE_CONNECTED; > kick_pending_request_queues(info); > - spin_unlock_irq(&blkif_io_lock); > + spin_unlock_irq(&info->io_lock); > > add_disk(info->gd); > > -- > 1.7.9 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |