[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] blktap: don't let in-flight requests defer pending ones


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-linux-2.6.18-xen <patchbot@xxxxxxx>
  • Date: Fri, 23 Nov 2012 11:00:04 +0000
  • Delivery-date: Fri, 23 Nov 2012 11:00:13 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1353667969 -3600
# Node ID ab5ce0b7fc2b5ee64ce96b0e1aa225a95a49d25b
# Parent  1f1b30566a42de417a2f2d1111376266e6464a51
blktap: don't let in-flight requests defer pending ones

Running RING_FINAL_CHECK_FOR_REQUESTS from make_response is a bad
idea. It means that in-flight I/O is essentially blocking continued
batches. This essentially kills throughput on frontends which unplug
(or even just notify) early and rightfully assume additional requests
will be picked up on time, not synchronously.

Derived from a similar blkback patch by Daniel Stodden (see c/s
1118:c7c14595c18b).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r 1f1b30566a42 -r ab5ce0b7fc2b drivers/xen/blktap/blktap.c
--- a/drivers/xen/blktap/blktap.c       Thu Nov 22 17:48:00 2012 +0100
+++ b/drivers/xen/blktap/blktap.c       Fri Nov 23 11:52:49 2012 +0100
@@ -1285,7 +1285,7 @@ irqreturn_t tap_blkif_be_int(int irq, vo
  * DOWNWARD CALLS -- These interface with the block-device layer proper.
  */
 static int print_dbug = 1;
-static int do_block_io_op(blkif_t *blkif)
+static int _do_block_io_op(blkif_t *blkif)
 {
        blkif_back_rings_t *blk_rings = &blkif->blk_rings;
        blkif_request_t req;
@@ -1397,6 +1397,22 @@ static int do_block_io_op(blkif_t *blkif
        return more_to_do;
 }
 
+static int do_block_io_op(blkif_t *blkif)
+{
+       blkif_back_rings_t *blk_rings = &blkif->blk_rings;
+       int more_to_do;
+
+       do {
+               more_to_do = _do_block_io_op(blkif);
+               if (more_to_do)
+                       break;
+
+               RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
+       } while (more_to_do);
+
+       return more_to_do;
+}
+
 static void dispatch_rw_block_io(blkif_t *blkif,
                                 blkif_request_t *req,
                                 pending_req_t *pending_req)
@@ -1647,7 +1663,6 @@ static void make_response(blkif_t *blkif
        blkif_response_t  resp;
        unsigned long     flags;
        blkif_back_rings_t *blk_rings = &blkif->blk_rings;
-       int more_to_do = 0;
        int notify;
 
        resp.id        = id;
@@ -1678,20 +1693,7 @@ static void make_response(blkif_t *blkif
        blk_rings->common.rsp_prod_pvt++;
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
 
-       if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
-               /*
-                * Tail check for pending requests. Allows frontend to avoid
-                * notifications if requests are already in flight (lower
-                * overheads and promotes batching).
-                */
-               RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
-       } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
-               more_to_do = 1;
-       }
-
        spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-       if (more_to_do)
-               blkif_notify_work(blkif);
        if (notify)
                notify_remote_via_irq(blkif->irq);
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.