[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] blkfront: make blkif_io_lock spinlock per-device



# HG changeset patch
# User Steven Noonan <snoonan@xxxxxxxxxx>
# Date 1330361509 -3600
# Node ID 151972c92963e10b7943bc6b6381a79caf566b5d
# Parent  5259389e19dcf76941c0ec57dee8ab37c5d850d5
blkfront: make blkif_io_lock spinlock per-device

This patch moves the global blkif_io_lock to the per-device structure.
The spinlock seems to exists for two reasons: to disable IRQs when in
the interrupt handlers for blkfront, and to protect the blkfront VBDs
when a detachment is requested.

Having a global blkif_io_lock doesn't make sense given the use case,
and it drastically hinders performance due to contention. All VBDs
with pending IOs have to take the lock in order to get work done,
which serializes everything pretty badly.

Signed-off-by: Steven Noonan <snoonan@xxxxxxxxxx>
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Committed-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r 5259389e19dc -r 151972c92963 drivers/xen/blkfront/blkfront.c
--- a/drivers/xen/blkfront/blkfront.c   Fri Feb 24 13:11:55 2012 +0100
+++ b/drivers/xen/blkfront/blkfront.c   Mon Feb 27 17:51:49 2012 +0100
@@ -108,6 +108,7 @@
                return -ENOMEM;
        }
 
+       spin_lock_init(&info->io_lock);
        info->xbdev = dev;
        info->vdevice = vdevice;
        info->connected = BLKIF_STATE_DISCONNECTED;
@@ -381,10 +382,10 @@
        (void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
 
        /* Kick pending requests. */
-       spin_lock_irq(&blkif_io_lock);
+       spin_lock_irq(&info->io_lock);
        info->connected = BLKIF_STATE_CONNECTED;
        kick_pending_request_queues(info);
-       spin_unlock_irq(&blkif_io_lock);
+       spin_unlock_irq(&info->io_lock);
 
        add_disk(info->gd);
 
@@ -406,12 +407,12 @@
        if (info->rq == NULL)
                goto out;
 
-       spin_lock_irqsave(&blkif_io_lock, flags);
+       spin_lock_irqsave(&info->io_lock, flags);
        /* No more blkif_request(). */
        blk_stop_queue(info->rq);
        /* No more gnttab callback work. */
        gnttab_cancel_free_callback(&info->callback);
-       spin_unlock_irqrestore(&blkif_io_lock, flags);
+       spin_unlock_irqrestore(&info->io_lock, flags);
 
        /* Flush gnttab callback work. Must be done with no locks held. */
        flush_scheduled_work();
@@ -484,10 +485,10 @@
 static void blkif_restart_queue(void *arg)
 {
        struct blkfront_info *info = (struct blkfront_info *)arg;
-       spin_lock_irq(&blkif_io_lock);
+       spin_lock_irq(&info->io_lock);
        if (info->connected == BLKIF_STATE_CONNECTED)
                kick_pending_request_queues(info);
-       spin_unlock_irq(&blkif_io_lock);
+       spin_unlock_irq(&info->io_lock);
 }
 
 static void blkif_restart_queue_callback(void *arg)
@@ -756,10 +757,10 @@
        struct blkfront_info *info = (struct blkfront_info *)dev_id;
        int uptodate;
 
-       spin_lock_irqsave(&blkif_io_lock, flags);
+       spin_lock_irqsave(&info->io_lock, flags);
 
        if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
-               spin_unlock_irqrestore(&blkif_io_lock, flags);
+               spin_unlock_irqrestore(&info->io_lock, flags);
                return IRQ_HANDLED;
        }
 
@@ -818,7 +819,7 @@
 
        kick_pending_request_queues(info);
 
-       spin_unlock_irqrestore(&blkif_io_lock, flags);
+       spin_unlock_irqrestore(&info->io_lock, flags);
 
        return IRQ_HANDLED;
 }
@@ -826,7 +827,7 @@
 static void blkif_free(struct blkfront_info *info, int suspend)
 {
        /* Prevent new requests being issued until we fix things up. */
-       spin_lock_irq(&blkif_io_lock);
+       spin_lock_irq(&info->io_lock);
        info->connected = suspend ?
                BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
        /* No more blkif_request(). */
@@ -834,7 +835,7 @@
                blk_stop_queue(info->rq);
        /* No more gnttab callback work. */
        gnttab_cancel_free_callback(&info->callback);
-       spin_unlock_irq(&blkif_io_lock);
+       spin_unlock_irq(&info->io_lock);
 
        /* Flush gnttab callback work. Must be done with no locks held. */
        flush_scheduled_work();
@@ -909,7 +910,7 @@
 
        (void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
 
-       spin_lock_irq(&blkif_io_lock);
+       spin_lock_irq(&info->io_lock);
 
        /* Now safe for us to use the shared ring */
        info->connected = BLKIF_STATE_CONNECTED;
@@ -920,7 +921,7 @@
        /* Kick any other new requests queued since we resumed */
        kick_pending_request_queues(info);
 
-       spin_unlock_irq(&blkif_io_lock);
+       spin_unlock_irq(&info->io_lock);
 }
 
 int blkfront_is_ready(struct xenbus_device *dev)
diff -r 5259389e19dc -r 151972c92963 drivers/xen/blkfront/block.h
--- a/drivers/xen/blkfront/block.h      Fri Feb 24 13:11:55 2012 +0100
+++ b/drivers/xen/blkfront/block.h      Mon Feb 27 17:51:49 2012 +0100
@@ -104,6 +104,7 @@
        int connected;
        int ring_ref;
        blkif_front_ring_t ring;
+       spinlock_t io_lock;
        struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        unsigned int irq;
        struct xlbd_major_info *mi;
@@ -122,8 +123,6 @@
        int users;
 };
 
-extern spinlock_t blkif_io_lock;
-
 extern int blkif_open(struct inode *inode, struct file *filep);
 extern int blkif_release(struct inode *inode, struct file *filep);
 extern int blkif_ioctl(struct inode *inode, struct file *filep,
diff -r 5259389e19dc -r 151972c92963 drivers/xen/blkfront/vbd.c
--- a/drivers/xen/blkfront/vbd.c        Fri Feb 24 13:11:55 2012 +0100
+++ b/drivers/xen/blkfront/vbd.c        Mon Feb 27 17:51:49 2012 +0100
@@ -116,8 +116,6 @@
 #endif
 };
 
-DEFINE_SPINLOCK(blkif_io_lock);
-
 static struct xlbd_major_info *
 xlbd_alloc_major_info(int major, int minor, int index)
 {
@@ -296,11 +294,12 @@
 }
 
 static int
-xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
+xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
+                    struct blkfront_info *info)
 {
        request_queue_t *rq;
 
-       rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
+       rq = blk_init_queue(do_blkif_request, &info->io_lock);
        if (rq == NULL)
                return -1;
 
@@ -323,6 +322,7 @@
        blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
 
        gd->queue = rq;
+       info->rq = rq;
 
        return 0;
 }
@@ -394,12 +394,11 @@
        gd->driverfs_dev = &(info->xbdev->dev);
        set_capacity(gd, capacity);
 
-       if (xlvbd_init_blk_queue(gd, sector_size)) {
+       if (xlvbd_init_blk_queue(gd, sector_size, info)) {
                del_gendisk(gd);
                goto release;
        }
 
-       info->rq = gd->queue;
        info->gd = gd;
 
        if (info->feature_barrier)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.