[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] linux-2.6.18/blkback: streamline main processing loop


  • To: "xen-devel" <xen-devel@xxxxxxxxxxxxx>
  • From: "Jan Beulich" <JBeulich@xxxxxxxx>
  • Date: Mon, 26 Mar 2012 16:38:51 +0100
  • Delivery-date: Mon, 26 Mar 2012 15:38:30 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>

- move stats updates into dispatch_rw_block_io(), allowing to fold
  common cases
- don't alloc pending_req_t instance when none is going to be needed
  (particularly relevant since they are a global resource)
- use type-safe assignment rather than memcpy() for obtaining native
  requests from ring

--- a/drivers/xen/blkback/blkback.c
+++ b/drivers/xen/blkback/blkback.c
@@ -315,32 +315,21 @@ static int _do_block_io_op(blkif_t *blki
        blkif_request_t req;
        pending_req_t *pending_req;
        RING_IDX rc, rp;
-       int more_to_do = 0;
 
        rc = blk_rings->common.req_cons;
        rp = blk_rings->common.sring->req_prod;
        rmb(); /* Ensure we see queued requests up to 'rp'. */
 
-       while ((rc != rp)) {
-
+       while (rc != rp) {
                if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
                        break;
 
-               if (kthread_should_stop()) {
-                       more_to_do = 1;
-                       break;
-               }
-
-               pending_req = alloc_req();
-               if (NULL == pending_req) {
-                       blkif->st_oo_req++;
-                       more_to_do = 1;
-                       break;
-               }
+               if (kthread_should_stop())
+                       return 1;
 
                switch (blkif->blk_protocol) {
                case BLKIF_PROTOCOL_NATIVE:
-                       memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), 
sizeof(req));
+                       req = *RING_GET_REQUEST(&blk_rings->native, rc);
                        break;
                case BLKIF_PROTOCOL_X86_32:
                        blkif_get_x86_32_req(&req, 
RING_GET_REQUEST(&blk_rings->x86_32, rc));
@@ -351,32 +340,37 @@ static int _do_block_io_op(blkif_t *blki
                default:
                        BUG();
                }
-               blk_rings->common.req_cons = ++rc; /* before make_response() */
 
-               /* Apply all sanity checks to /private copy/ of request. */
-               barrier();
+               ++rc;
 
                switch (req.operation) {
                case BLKIF_OP_READ:
-                       blkif->st_rd_req++;
-                       dispatch_rw_block_io(blkif, &req, pending_req);
-                       break;
-               case BLKIF_OP_WRITE_BARRIER:
-                       blkif->st_br_req++;
-                       /* fall through */
                case BLKIF_OP_WRITE:
-                       blkif->st_wr_req++;
+               case BLKIF_OP_WRITE_BARRIER:
+                       pending_req = alloc_req();
+                       if (!pending_req) {
+                               blkif->st_oo_req++;
+                               return 1;
+                       }
+
+                       /* before make_response() */
+                       blk_rings->common.req_cons = rc;
+
+                       /* Apply all sanity checks to /private copy/ of 
request. */
+                       barrier();
+
                        dispatch_rw_block_io(blkif, &req, pending_req);
                        break;
                default:
                        /* A good sign something is wrong: sleep for a while to
                         * avoid excessive CPU consumption by a bad guest. */
                        msleep(1);
+                       blk_rings->common.req_cons = rc;
+                       barrier();
                        DPRINTK("error: unknown block io operation [%d]\n",
                                req.operation);
                        make_response(blkif, req.id, req.operation,
                                      BLKIF_RSP_ERROR);
-                       free_req(pending_req);
                        break;
                }
 
@@ -384,7 +378,7 @@ static int _do_block_io_op(blkif_t *blki
                cond_resched();
        }
 
-       return more_to_do;
+       return 0;
 }
 
 static int
@@ -421,12 +415,15 @@ static void dispatch_rw_block_io(blkif_t
 
        switch (req->operation) {
        case BLKIF_OP_READ:
+               blkif->st_rd_req++;
                operation = READ;
                break;
        case BLKIF_OP_WRITE:
+               blkif->st_wr_req++;
                operation = WRITE;
                break;
        case BLKIF_OP_WRITE_BARRIER:
+               blkif->st_br_req++;
                operation = WRITE_BARRIER;
                break;
        default:
@@ -559,7 +556,7 @@ static void dispatch_rw_block_io(blkif_t
 
        if (operation == READ)
                blkif->st_rd_sect += preq.nr_sects;
-       else if (operation == WRITE || operation == WRITE_BARRIER)
+       else
                blkif->st_wr_sect += preq.nr_sects;
 
        return;


Attachment: xen-blkback-streamline-main-loop.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.