[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] blkback: Don't let in-flight requests defer pending ones



# HG changeset patch
# User Daniel Stodden <daniel.stodden@xxxxxxxxxx>
# Date 1318338490 -7200
# Node ID c7c14595c18b4ddc1a16e7b64352ba8ac21fbecf
# Parent  28740a325058c0c196ef22f59506710dc4ff4c55
blkback: Don't let in-flight requests defer pending ones

Running RING_FINAL_CHECK_FOR_REQUESTS from make_response is a bad
idea. It means that in-flight I/O is essentially blocking continued
batches. This essentially kills throughput on frontends which unplug
(or even just notify) early and rightfully assume additional requests
will be picked up on time, not synchronously.

Saw cache writeback going up from 45MB/s top 65MB/s for xen-blkfront
on ISCSI.

Signed-off-by: Daniel Stodden <daniel.stodden@xxxxxxxxxx>
Committed-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r 28740a325058 -r c7c14595c18b drivers/xen/blkback/blkback.c
--- a/drivers/xen/blkback/blkback.c     Mon Oct 10 14:43:20 2011 +0200
+++ b/drivers/xen/blkback/blkback.c     Tue Oct 11 15:08:10 2011 +0200
@@ -309,7 +309,7 @@
  * DOWNWARD CALLS -- These interface with the block-device layer proper.
  */
 
-static int do_block_io_op(blkif_t *blkif)
+static int _do_block_io_op(blkif_t *blkif)
 {
        blkif_back_rings_t *blk_rings = &blkif->blk_rings;
        blkif_request_t req;
@@ -387,6 +387,23 @@
        return more_to_do;
 }
 
+static int
+do_block_io_op(blkif_t *blkif)
+{
+       blkif_back_rings_t *blk_rings = &blkif->blk_rings;
+       int more_to_do;
+
+       do {
+               more_to_do = _do_block_io_op(blkif);
+               if (more_to_do)
+                       break;
+
+               RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
+       } while (more_to_do);
+
+       return more_to_do;
+}
+
 static void dispatch_rw_block_io(blkif_t *blkif,
                                 blkif_request_t *req,
                                 pending_req_t *pending_req)
@@ -577,7 +594,6 @@
        blkif_response_t  resp;
        unsigned long     flags;
        blkif_back_rings_t *blk_rings = &blkif->blk_rings;
-       int more_to_do = 0;
        int notify;
 
        resp.id        = id;
@@ -604,22 +620,8 @@
        }
        blk_rings->common.rsp_prod_pvt++;
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
-       if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
-               /*
-                * Tail check for pending requests. Allows frontend to avoid
-                * notifications if requests are already in flight (lower
-                * overheads and promotes batching).
-                */
-               RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
-
-       } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
-               more_to_do = 1;
-       }
-
        spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
 
-       if (more_to_do)
-               blkif_notify_work(blkif);
        if (notify)
                notify_remote_via_irq(blkif->irq);
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.