[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 01/14] ubd: refactor the interrupt handler



Instead of a separate handler function that leaves no work in the
interrupt hanler itself, split out a per-request end I/O helper and
clean up the coding style and variable naming while we're at it.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
 arch/um/drivers/ubd_kern.c | 49 ++++++++++++++------------------------
 1 file changed, 18 insertions(+), 31 deletions(-)

diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index ef805eaa9e013d..0c9542d58c01b7 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -447,43 +447,30 @@ static int bulk_req_safe_read(
        return n;
 }
 
-/* Called without dev->lock held, and only in interrupt context. */
-static void ubd_handler(void)
+static void ubd_end_request(struct io_thread_req *io_req)
 {
-       int n;
-       int count;
-
-       while(1){
-               n = bulk_req_safe_read(
-                       thread_fd,
-                       irq_req_buffer,
-                       &irq_remainder,
-                       &irq_remainder_size,
-                       UBD_REQ_BUFFER_SIZE
-               );
-               if (n < 0) {
-                       if(n == -EAGAIN)
-                               break;
-                       printk(KERN_ERR "spurious interrupt in ubd_handler, "
-                              "err = %d\n", -n);
-                       return;
-               }
-               for (count = 0; count < n/sizeof(struct io_thread_req *); 
count++) {
-                       struct io_thread_req *io_req = (*irq_req_buffer)[count];
-
-                       if ((io_req->error == BLK_STS_NOTSUPP) && 
(req_op(io_req->req) == REQ_OP_DISCARD)) {
-                               blk_queue_max_discard_sectors(io_req->req->q, 
0);
-                               
blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
-                       }
-                       blk_mq_end_request(io_req->req, io_req->error);
-                       kfree(io_req);
-               }
+       if (io_req->error == BLK_STS_NOTSUPP &&
+           req_op(io_req->req) == REQ_OP_DISCARD) {
+               blk_queue_max_discard_sectors(io_req->req->q, 0);
+               blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
        }
+       blk_mq_end_request(io_req->req, io_req->error);
+       kfree(io_req);
 }
 
 static irqreturn_t ubd_intr(int irq, void *dev)
 {
-       ubd_handler();
+       int len, i;
+
+       while ((len = bulk_req_safe_read(thread_fd, irq_req_buffer,
+                       &irq_remainder, &irq_remainder_size,
+                       UBD_REQ_BUFFER_SIZE)) >= 0) {
+               for (i = 0; i < len / sizeof(struct io_thread_req *); i++)
+                       ubd_end_request((*irq_req_buffer)[i]);
+       }
+
+       if (len < 0 && len != -EAGAIN)
+               pr_err("spurious interrupt in %s, err = %d\n", __func__, len);
        return IRQ_HANDLED;
 }
 
-- 
2.43.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.