[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v9 13/18] xen: purge 'blk' and 'ioreq' from function names in dataplane/xen-block.c
This is a purely cosmetic patch that purges remaining use of 'blk' and 'ioreq' in local function names, and then makes sure all functions are prefixed with 'xen_block_'. No functional change. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Acked-by: Anthony Perard <anthony.perard@xxxxxxxxxx> --- Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx> Cc: Stefan Hajnoczi <stefanha@xxxxxxxxxx> Cc: Kevin Wolf <kwolf@xxxxxxxxxx> Cc: Max Reitz <mreitz@xxxxxxxxxx> v2: - Add 'xen_block_' prefix --- hw/block/dataplane/xen-block.c | 90 +++++++++++++++++----------------- 1 file changed, 46 insertions(+), 44 deletions(-) diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c index 6788bbf338..8e3965e171 100644 --- a/hw/block/dataplane/xen-block.c +++ b/hw/block/dataplane/xen-block.c @@ -67,7 +67,7 @@ struct XenBlockDataPlane { AioContext *ctx; }; -static void ioreq_reset(XenBlockRequest *request) +static void reset_request(XenBlockRequest *request) { memset(&request->req, 0, sizeof(request->req)); request->status = 0; @@ -86,7 +86,7 @@ static void ioreq_reset(XenBlockRequest *request) qemu_iovec_reset(&request->v); } -static XenBlockRequest *ioreq_start(XenBlockDataPlane *dataplane) +static XenBlockRequest *xen_block_start_request(XenBlockDataPlane *dataplane) { XenBlockRequest *request = NULL; @@ -111,7 +111,7 @@ out: return request; } -static void ioreq_finish(XenBlockRequest *request) +static void xen_block_finish_request(XenBlockRequest *request) { XenBlockDataPlane *dataplane = request->dataplane; @@ -121,12 +121,12 @@ static void ioreq_finish(XenBlockRequest *request) dataplane->requests_finished++; } -static void ioreq_release(XenBlockRequest *request, bool finish) +static void xen_block_release_request(XenBlockRequest *request, bool finish) { XenBlockDataPlane *dataplane = request->dataplane; QLIST_REMOVE(request, list); - ioreq_reset(request); + reset_request(request); request->dataplane = dataplane; QLIST_INSERT_HEAD(&dataplane->freelist, request, list); if (finish) { @@ -140,7 +140,7 @@ static void ioreq_release(XenBlockRequest *request, bool finish) * translate request into iovec + start offset * do sanity checks along the way */ -static int ioreq_parse(XenBlockRequest *request) +static int xen_block_parse_request(XenBlockRequest *request) { XenBlockDataPlane *dataplane = request->dataplane; size_t len; @@ -201,7 +201,7 @@ err: return -1; } -static int ioreq_grant_copy(XenBlockRequest *request) +static int xen_block_copy_request(XenBlockRequest *request) { XenBlockDataPlane *dataplane = request->dataplane; XenDevice *xendev = dataplane->xendev; @@ -247,9 +247,9 @@ static int ioreq_grant_copy(XenBlockRequest *request) return 0; } -static int ioreq_runio_qemu_aio(XenBlockRequest *request); +static int xen_block_do_aio(XenBlockRequest *request); -static void qemu_aio_complete(void *opaque, int ret) +static void xen_block_complete_aio(void *opaque, int ret) { XenBlockRequest *request = opaque; XenBlockDataPlane *dataplane = request->dataplane; @@ -266,7 +266,7 @@ static void qemu_aio_complete(void *opaque, int ret) request->aio_inflight--; if (request->presync) { request->presync = 0; - ioreq_runio_qemu_aio(request); + xen_block_do_aio(request); goto done; } if (request->aio_inflight > 0) { @@ -277,7 +277,7 @@ static void qemu_aio_complete(void *opaque, int ret) case BLKIF_OP_READ: /* in case of failure request->aio_errors is increased */ if (ret == 0) { - ioreq_grant_copy(request); + xen_block_copy_request(request); } qemu_vfree(request->buf); break; @@ -293,7 +293,7 @@ static void qemu_aio_complete(void *opaque, int ret) } request->status = request->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; - ioreq_finish(request); + xen_block_finish_request(request); switch (request->req.operation) { case BLKIF_OP_WRITE: @@ -318,9 +318,9 @@ done: aio_context_release(dataplane->ctx); } -static bool blk_split_discard(XenBlockRequest *request, - blkif_sector_t sector_number, - uint64_t nr_sectors) +static bool xen_block_split_discard(XenBlockRequest *request, + blkif_sector_t sector_number, + uint64_t nr_sectors) { XenBlockDataPlane *dataplane = request->dataplane; int64_t byte_offset; @@ -343,7 +343,7 @@ static bool blk_split_discard(XenBlockRequest *request, byte_chunk = byte_remaining > limit ? limit : byte_remaining; request->aio_inflight++; blk_aio_pdiscard(dataplane->blk, byte_offset, byte_chunk, - qemu_aio_complete, request); + xen_block_complete_aio, request); byte_remaining -= byte_chunk; byte_offset += byte_chunk; } while (byte_remaining > 0); @@ -351,7 +351,7 @@ static bool blk_split_discard(XenBlockRequest *request, return true; } -static int ioreq_runio_qemu_aio(XenBlockRequest *request) +static int xen_block_do_aio(XenBlockRequest *request) { XenBlockDataPlane *dataplane = request->dataplane; @@ -359,14 +359,15 @@ static int ioreq_runio_qemu_aio(XenBlockRequest *request) if (request->req.nr_segments && (request->req.operation == BLKIF_OP_WRITE || request->req.operation == BLKIF_OP_FLUSH_DISKCACHE) && - ioreq_grant_copy(request)) { + xen_block_copy_request(request)) { qemu_vfree(request->buf); goto err; } request->aio_inflight++; if (request->presync) { - blk_aio_flush(request->dataplane->blk, qemu_aio_complete, request); + blk_aio_flush(request->dataplane->blk, xen_block_complete_aio, + request); return 0; } @@ -377,7 +378,7 @@ static int ioreq_runio_qemu_aio(XenBlockRequest *request) request->v.size, BLOCK_ACCT_READ); request->aio_inflight++; blk_aio_preadv(dataplane->blk, request->start, &request->v, 0, - qemu_aio_complete, request); + xen_block_complete_aio, request); break; case BLKIF_OP_WRITE: case BLKIF_OP_FLUSH_DISKCACHE: @@ -392,12 +393,13 @@ static int ioreq_runio_qemu_aio(XenBlockRequest *request) BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH); request->aio_inflight++; blk_aio_pwritev(dataplane->blk, request->start, &request->v, 0, - qemu_aio_complete, request); + xen_block_complete_aio, request); break; case BLKIF_OP_DISCARD: { struct blkif_request_discard *req = (void *)&request->req; - if (!blk_split_discard(request, req->sector_number, req->nr_sectors)) { + if (!xen_block_split_discard(request, req->sector_number, + req->nr_sectors)) { goto err; } break; @@ -407,17 +409,17 @@ static int ioreq_runio_qemu_aio(XenBlockRequest *request) goto err; } - qemu_aio_complete(request, 0); + xen_block_complete_aio(request, 0); return 0; err: - ioreq_finish(request); + xen_block_finish_request(request); request->status = BLKIF_RSP_ERROR; return -1; } -static int blk_send_response_one(XenBlockRequest *request) +static int xen_block_send_response_one(XenBlockRequest *request) { XenBlockDataPlane *dataplane = request->dataplane; int send_notify = 0; @@ -473,15 +475,15 @@ static int blk_send_response_one(XenBlockRequest *request) } /* walk finished list, send outstanding responses, free requests */ -static void blk_send_response_all(XenBlockDataPlane *dataplane) +static void xen_block_send_response_all(XenBlockDataPlane *dataplane) { XenBlockRequest *request; int send_notify = 0; while (!QLIST_EMPTY(&dataplane->finished)) { request = QLIST_FIRST(&dataplane->finished); - send_notify += blk_send_response_one(request); - ioreq_release(request, true); + send_notify += xen_block_send_response_one(request); + xen_block_release_request(request, true); } if (send_notify) { Error *local_err = NULL; @@ -495,8 +497,8 @@ static void blk_send_response_all(XenBlockDataPlane *dataplane) } } -static int blk_get_request(XenBlockDataPlane *dataplane, - XenBlockRequest *request, RING_IDX rc) +static int xen_block_get_request(XenBlockDataPlane *dataplane, + XenBlockRequest *request, RING_IDX rc) { switch (dataplane->protocol) { case BLKIF_PROTOCOL_NATIVE: { @@ -526,7 +528,7 @@ static int blk_get_request(XenBlockDataPlane *dataplane, return 0; } -static void blk_handle_requests(XenBlockDataPlane *dataplane) +static void xen_block_handle_requests(XenBlockDataPlane *dataplane) { RING_IDX rc, rp; XenBlockRequest *request; @@ -537,23 +539,22 @@ static void blk_handle_requests(XenBlockDataPlane *dataplane) rp = dataplane->rings.common.sring->req_prod; xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ - blk_send_response_all(dataplane); + xen_block_send_response_all(dataplane); while (rc != rp) { /* pull request from ring */ if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) { break; } - request = ioreq_start(dataplane); + request = xen_block_start_request(dataplane); if (request == NULL) { dataplane->more_work++; break; } - blk_get_request(dataplane, request, rc); + xen_block_get_request(dataplane, request, rc); dataplane->rings.common.req_cons = ++rc; /* parse them */ - if (ioreq_parse(request) != 0) { - + if (xen_block_parse_request(request) != 0) { switch (request->req.operation) { case BLKIF_OP_READ: block_acct_invalid(blk_get_stats(dataplane->blk), @@ -570,7 +571,7 @@ static void blk_handle_requests(XenBlockDataPlane *dataplane) break; }; - if (blk_send_response_one(request)) { + if (xen_block_send_response_one(request)) { Error *local_err = NULL; xen_device_notify_event_channel(dataplane->xendev, @@ -580,11 +581,11 @@ static void blk_handle_requests(XenBlockDataPlane *dataplane) error_report_err(local_err); } } - ioreq_release(request, false); + xen_block_release_request(request, false); continue; } - ioreq_runio_qemu_aio(request); + xen_block_do_aio(request); } if (dataplane->more_work && @@ -593,16 +594,16 @@ static void blk_handle_requests(XenBlockDataPlane *dataplane) } } -static void blk_bh(void *opaque) +static void xen_block_dataplane_bh(void *opaque) { XenBlockDataPlane *dataplane = opaque; aio_context_acquire(dataplane->ctx); - blk_handle_requests(dataplane); + xen_block_handle_requests(dataplane); aio_context_release(dataplane->ctx); } -static void blk_event(void *opaque) +static void xen_block_dataplane_event(void *opaque) { XenBlockDataPlane *dataplane = opaque; @@ -631,7 +632,8 @@ XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev, } else { dataplane->ctx = qemu_get_aio_context(); } - dataplane->bh = aio_bh_new(dataplane->ctx, blk_bh, dataplane); + dataplane->bh = aio_bh_new(dataplane->ctx, xen_block_dataplane_bh, + dataplane); return dataplane; } @@ -789,7 +791,7 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane, dataplane->event_channel = xen_device_bind_event_channel(xendev, event_channel, - blk_event, dataplane, + xen_block_dataplane_event, dataplane, &local_err); if (local_err) { error_propagate(errp, local_err); -- 2.20.1.2.gb21ebb6 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |