[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 2/6] block/nvme: convert to blk_io_plug_call() API
Stop using the .bdrv_co_io_plug() API because it is not multi-queue block layer friendly. Use the new blk_io_plug_call() API to batch I/O submission instead. Signed-off-by: Stefan Hajnoczi <stefanha@xxxxxxxxxx> Reviewed-by: Eric Blake <eblake@xxxxxxxxxx> --- v2 - Remove unused nvme_process_completion_queue_plugged trace event [Stefano] --- block/nvme.c | 44 ++++++++++++-------------------------------- block/trace-events | 1 - 2 files changed, 12 insertions(+), 33 deletions(-) diff --git a/block/nvme.c b/block/nvme.c index 5b744c2bda..100b38b592 100644 --- a/block/nvme.c +++ b/block/nvme.c @@ -25,6 +25,7 @@ #include "qemu/vfio-helpers.h" #include "block/block-io.h" #include "block/block_int.h" +#include "sysemu/block-backend.h" #include "sysemu/replay.h" #include "trace.h" @@ -119,7 +120,6 @@ struct BDRVNVMeState { int blkshift; uint64_t max_transfer; - bool plugged; bool supports_write_zeroes; bool supports_discard; @@ -282,7 +282,7 @@ static void nvme_kick(NVMeQueuePair *q) { BDRVNVMeState *s = q->s; - if (s->plugged || !q->need_kick) { + if (!q->need_kick) { return; } trace_nvme_kick(s, q->index); @@ -387,10 +387,6 @@ static bool nvme_process_completion(NVMeQueuePair *q) NvmeCqe *c; trace_nvme_process_completion(s, q->index, q->inflight); - if (s->plugged) { - trace_nvme_process_completion_queue_plugged(s, q->index); - return false; - } /* * Support re-entrancy when a request cb() function invokes aio_poll(). @@ -480,6 +476,15 @@ static void nvme_trace_command(const NvmeCmd *cmd) } } +static void nvme_unplug_fn(void *opaque) +{ + NVMeQueuePair *q = opaque; + + QEMU_LOCK_GUARD(&q->lock); + nvme_kick(q); + nvme_process_completion(q); +} + static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req, NvmeCmd *cmd, BlockCompletionFunc cb, void *opaque) @@ -496,8 +501,7 @@ static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req, q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd)); q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE; q->need_kick++; - nvme_kick(q); - nvme_process_completion(q); + blk_io_plug_call(nvme_unplug_fn, q); qemu_mutex_unlock(&q->lock); } @@ -1567,27 +1571,6 @@ static void nvme_attach_aio_context(BlockDriverState *bs, } } -static void coroutine_fn nvme_co_io_plug(BlockDriverState *bs) -{ - BDRVNVMeState *s = bs->opaque; - assert(!s->plugged); - s->plugged = true; -} - -static void coroutine_fn nvme_co_io_unplug(BlockDriverState *bs) -{ - BDRVNVMeState *s = bs->opaque; - assert(s->plugged); - s->plugged = false; - for (unsigned i = INDEX_IO(0); i < s->queue_count; i++) { - NVMeQueuePair *q = s->queues[i]; - qemu_mutex_lock(&q->lock); - nvme_kick(q); - nvme_process_completion(q); - qemu_mutex_unlock(&q->lock); - } -} - static bool nvme_register_buf(BlockDriverState *bs, void *host, size_t size, Error **errp) { @@ -1664,9 +1647,6 @@ static BlockDriver bdrv_nvme = { .bdrv_detach_aio_context = nvme_detach_aio_context, .bdrv_attach_aio_context = nvme_attach_aio_context, - .bdrv_co_io_plug = nvme_co_io_plug, - .bdrv_co_io_unplug = nvme_co_io_unplug, - .bdrv_register_buf = nvme_register_buf, .bdrv_unregister_buf = nvme_unregister_buf, }; diff --git a/block/trace-events b/block/trace-events index 32665158d6..048ad27519 100644 --- a/block/trace-events +++ b/block/trace-events @@ -141,7 +141,6 @@ nvme_kick(void *s, unsigned q_index) "s %p q #%u" nvme_dma_flush_queue_wait(void *s) "s %p" nvme_error(int cmd_specific, int sq_head, int sqid, int cid, int status) "cmd_specific %d sq_head %d sqid %d cid %d status 0x%x" nvme_process_completion(void *s, unsigned q_index, int inflight) "s %p q #%u inflight %d" -nvme_process_completion_queue_plugged(void *s, unsigned q_index) "s %p q #%u" nvme_complete_command(void *s, unsigned q_index, int cid) "s %p q #%u cid %d" nvme_submit_command(void *s, unsigned q_index, int cid) "s %p q #%u cid %d" nvme_submit_command_raw(int c0, int c1, int c2, int c3, int c4, int c5, int c6, int c7) "%02x %02x %02x %02x %02x %02x %02x %02x" -- 2.40.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |