[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [linux-2.6.18-xen] blktap2: use blk_rq_map_sg() here too
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1244108814 -3600 # Node ID f59c5daed527f3a7ddf1270480ec63028c206f31 # Parent 20be7f6d414a6e26aa614ea9f7f3d29167e2d269 blktap2: use blk_rq_map_sg() here too Just like in blkfront, not doing so can cause the maximum number of segments check to trigger. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> --- drivers/xen/blktap2/blktap.h | 2 + drivers/xen/blktap2/device.c | 45 ++++++++++++++++++------------------------- 2 files changed, 21 insertions(+), 26 deletions(-) diff -r 20be7f6d414a -r f59c5daed527 drivers/xen/blktap2/blktap.h --- a/drivers/xen/blktap2/blktap.h Thu Jun 04 10:45:49 2009 +0100 +++ b/drivers/xen/blktap2/blktap.h Thu Jun 04 10:46:54 2009 +0100 @@ -4,6 +4,7 @@ #include <linux/fs.h> #include <linux/poll.h> #include <linux/cdev.h> +#include <linux/scatterlist.h> #include <xen/blkif.h> #include <xen/gnttab.h> @@ -174,6 +175,7 @@ struct blktap { int pending_cnt; struct blktap_request *pending_requests[MAX_PENDING_REQS]; + struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; wait_queue_head_t wq; struct list_head deferred_queue; diff -r 20be7f6d414a -r f59c5daed527 drivers/xen/blktap2/device.c --- a/drivers/xen/blktap2/device.c Thu Jun 04 10:45:49 2009 +0100 +++ b/drivers/xen/blktap2/device.c Thu Jun 04 10:46:54 2009 +0100 @@ -574,11 +574,10 @@ blktap_device_process_request(struct blk struct blktap_request *request, struct request *req) { - struct bio *bio; struct page *page; - struct bio_vec *bvec; - int idx, usr_idx, err; + int i, usr_idx, err; struct blktap_ring *ring; + struct scatterlist *sg; struct blktap_grant_table table; unsigned int fsect, lsect, nr_sects; unsigned long offset, uvaddr, kvaddr; @@ -605,43 +604,39 @@ blktap_device_process_request(struct blk nr_sects = 0; request->nr_pages = 0; - blkif_req.nr_segments = 0; - rq_for_each_bio(bio, req) { - bio_for_each_segment(bvec, bio, idx) { - BUG_ON(blkif_req.nr_segments == - BLKIF_MAX_SEGMENTS_PER_REQUEST); - - fsect = bvec->bv_offset >> 9; - lsect = fsect + (bvec->bv_len >> 9) - 1; - nr_sects += bvec->bv_len >> 9; - - blkif_req.seg[blkif_req.nr_segments] = + blkif_req.nr_segments = blk_rq_map_sg(req->q, req, tap->sg); + BUG_ON(blkif_req.nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); + for (i = 0; i < blkif_req.nr_segments; ++i) { + sg = tap->sg + i; + fsect = sg->offset >> 9; + lsect = fsect + (sg->length >> 9) - 1; + nr_sects += sg->length >> 9; + + blkif_req.seg[i] = (struct blkif_request_segment) { .gref = 0, .first_sect = fsect, .last_sect = lsect }; - if (PageBlkback(bvec->bv_page)) { + if (PageBlkback(sg->page)) { /* foreign page -- use xen */ if (blktap_prep_foreign(tap, request, &blkif_req, - blkif_req.nr_segments, - bvec->bv_page, + i, + sg->page, &table)) goto out; } else { /* do it the old fashioned way */ blktap_map(tap, request, - blkif_req.nr_segments, - bvec->bv_page); + i, + sg->page); } - uvaddr = MMAP_VADDR(ring->user_vstart, - usr_idx, blkif_req.nr_segments); - kvaddr = request_to_kaddr(request, - blkif_req.nr_segments); + uvaddr = MMAP_VADDR(ring->user_vstart, usr_idx, i); + kvaddr = request_to_kaddr(request, i); offset = (uvaddr - ring->vma->vm_start) >> PAGE_SHIFT; page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT); ring->foreign_map.map[offset] = page; @@ -651,12 +646,10 @@ blktap_device_process_request(struct blk uvaddr, page, __pa(kvaddr) >> PAGE_SHIFT); BTDBG("offset: 0x%08lx, pending_req: %p, seg: %d, " "page: %p, kvaddr: 0x%08lx, uvaddr: 0x%08lx\n", - offset, request, blkif_req.nr_segments, + offset, request, i, page, kvaddr, uvaddr); - blkif_req.nr_segments++; request->nr_pages++; - } } if (blktap_map_foreign(tap, request, &blkif_req, &table)) _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |