[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] blkback: Do not queue up bio structs before submitting them.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1225967340 0
# Node ID 61ab98b5cc0ebf20dd766ec67a84319f058ef6f0
# Parent  0729cb8b1292c9168db0b8e3f2251fd0a9d531b8
blkback: Do not queue up bio structs before submitting them.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 drivers/xen/blkback/blkback.c |   44 +++++++++++++++++++++++++-----------------
 1 files changed, 27 insertions(+), 17 deletions(-)

diff -r 0729cb8b1292 -r 61ab98b5cc0e drivers/xen/blkback/blkback.c
--- a/drivers/xen/blkback/blkback.c     Wed Nov 05 15:43:55 2008 +0000
+++ b/drivers/xen/blkback/blkback.c     Thu Nov 06 10:29:00 2008 +0000
@@ -151,9 +151,9 @@ static void unplug_queue(blkif_t *blkif)
        blkif->plug = NULL;
 }
 
-static void plug_queue(blkif_t *blkif, struct bio *bio)
-{
-       request_queue_t *q = bdev_get_queue(bio->bi_bdev);
+static void plug_queue(blkif_t *blkif, struct block_device *bdev)
+{
+       request_queue_t *q = bdev_get_queue(bdev);
 
        if (q == blkif->plug)
                return;
@@ -389,8 +389,8 @@ static void dispatch_rw_block_io(blkif_t
                unsigned long buf; unsigned int nsec;
        } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        unsigned int nseg;
-       struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-       int ret, i, nbio = 0;
+       struct bio *bio = NULL;
+       int ret, i;
        int operation;
 
        switch (req->operation) {
@@ -477,6 +477,10 @@ static void dispatch_rw_block_io(blkif_t
                goto fail_flush;
        }
 
+       plug_queue(blkif, preq.bdev);
+       atomic_set(&pending_req->pendcnt, 1);
+       blkif_get(blkif);
+
        for (i = 0; i < nseg; i++) {
                if (((int)preq.sector_number|(int)seg[i].nsec) &
                    ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
@@ -490,7 +494,12 @@ static void dispatch_rw_block_io(blkif_t
                                     virt_to_page(vaddr(pending_req, i)),
                                     seg[i].nsec << 9,
                                     seg[i].buf & ~PAGE_MASK) == 0)) {
-                       bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
+                       if (bio) {
+                               atomic_inc(&pending_req->pendcnt);
+                               submit_bio(operation, bio);
+                       }
+
+                       bio = bio_alloc(GFP_KERNEL, nseg-i);
                        if (unlikely(bio == NULL))
                                goto fail_put_bio;
 
@@ -505,7 +514,7 @@ static void dispatch_rw_block_io(blkif_t
 
        if (!bio) {
                BUG_ON(operation != WRITE_BARRIER);
-               bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0);
+               bio = bio_alloc(GFP_KERNEL, 0);
                if (unlikely(bio == NULL))
                        goto fail_put_bio;
 
@@ -515,12 +524,7 @@ static void dispatch_rw_block_io(blkif_t
                bio->bi_sector  = -1;
        }
 
-       plug_queue(blkif, bio);
-       atomic_set(&pending_req->pendcnt, nbio);
-       blkif_get(blkif);
-
-       for (i = 0; i < nbio; i++)
-               submit_bio(operation, biolist[i]);
+       submit_bio(operation, bio);
 
        if (operation == READ)
                blkif->st_rd_sect += preq.nr_sects;
@@ -529,16 +533,22 @@ static void dispatch_rw_block_io(blkif_t
 
        return;
 
- fail_put_bio:
-       for (i = 0; i < (nbio-1); i++)
-               bio_put(biolist[i]);
  fail_flush:
        fast_flush_area(pending_req);
  fail_response:
        make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
        free_req(pending_req);
        msleep(1); /* back off a bit */
-} 
+       return;
+
+ fail_put_bio:
+       __end_block_io_op(pending_req, -EINVAL);
+       if (bio)
+               bio_put(bio);
+       unplug_queue(blkif);
+       msleep(1); /* back off a bit */
+       return;
+}
 
 
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.