[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 11 of 12] blkback/blktap2: simplify address translations
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1260981852 0 # Node ID 317cdc16a674f4aa4c57b4fdae004ca5fcc24712 # Parent 6b5c16a30bc5e7d771dec05c7cee597b71a695c2 blkback/blktap2: simplify address translations There are quite a number of places where e.g. page->va->page translations happen. Besides yielding smaller code (source and binary), a second goal is to make it easier to determine where virtual addresses of pages allocated through alloc_empty_pages_and_pagevec() are really used (in turn in order to determine whether using highmem pages would be possible there). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> linux-2.6-pvops: * Stripped drivers/xen/gntdev/* * Stripped drivers/xen/netback/* Signed-off-by: Daniel Stodden <daniel.stodden@xxxxxxxxxx> diff -r 6b5c16a30bc5 -r 317cdc16a674 drivers/xen/blkback/blkback.c --- a/drivers/xen/blkback/blkback.c Fri Dec 11 08:47:19 2009 +0000 +++ b/drivers/xen/blkback/blkback.c Wed Dec 16 16:44:12 2009 +0000 @@ -99,9 +99,11 @@ return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; } +#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] + static inline unsigned long vaddr(pending_req_t *req, int seg) { - unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]); + unsigned long pfn = page_to_pfn(pending_page(req, seg)); return (unsigned long)pfn_to_kaddr(pfn); } @@ -178,7 +180,7 @@ handle = pending_handle(req, i); if (handle == BLKBACK_INVALID_HANDLE) continue; - blkback_pagemap_clear(virt_to_page(vaddr(req, i))); + blkback_pagemap_clear(pending_page(req, i)); gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), GNTMAP_host_map, handle); pending_handle(req, i) = BLKBACK_INVALID_HANDLE; @@ -456,7 +458,7 @@ ret |= 1; } else { blkback_pagemap_set(vaddr_pagenr(pending_req, i), - virt_to_page(vaddr(pending_req, i)), + pending_page(pending_req, i), blkif->domid, req->handle, req->seg[i].gref); } @@ -466,8 +468,8 @@ if (ret) continue; - set_phys_to_machine(__pa(vaddr( - pending_req, i)) >> PAGE_SHIFT, + set_phys_to_machine( + page_to_pfn(pending_page(pending_req, i)), FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT)); seg[i].buf = map[i].dev_bus_addr | (req->seg[i].first_sect << 9); @@ -498,7 +500,7 @@ while ((bio == NULL) || (bio_add_page(bio, - virt_to_page(vaddr(pending_req, i)), + pending_page(pending_req, i), seg[i].nsec << 9, seg[i].buf & ~PAGE_MASK) == 0)) { if (bio) { diff -r 6b5c16a30bc5 -r 317cdc16a674 drivers/xen/blktap/blktap.h --- a/drivers/xen/blktap/blktap.h Fri Dec 11 08:47:19 2009 +0000 +++ b/drivers/xen/blktap/blktap.h Wed Dec 16 16:44:12 2009 +0000 @@ -241,6 +241,13 @@ int blktap_request_pool_shrink(void); struct blktap_request *blktap_request_allocate(struct blktap *); void blktap_request_free(struct blktap *, struct blktap_request *); -unsigned long request_to_kaddr(struct blktap_request *, int); +struct page *request_to_page(struct blktap_request *, int); + +static inline unsigned long +request_to_kaddr(struct blktap_request *req, int seg) +{ + unsigned long pfn = page_to_pfn(request_to_page(req, seg)); + return (unsigned long)pfn_to_kaddr(pfn); +} #endif diff -r 6b5c16a30bc5 -r 317cdc16a674 drivers/xen/blktap/device.c --- a/drivers/xen/blktap/device.c Fri Dec 11 08:47:19 2009 +0000 +++ b/drivers/xen/blktap/device.c Wed Dec 16 16:44:12 2009 +0000 @@ -315,15 +315,14 @@ down_write(&tap->ring.vma->vm_mm->mmap_sem); for (i = 0; i < request->nr_pages; i++) { + kvaddr = request_to_kaddr(request, i); BTDBG("request: %p, seg: %d, kvaddr: 0x%08lx, khandle: %u, " "uvaddr: 0x%08lx, uhandle: %u\n", request, i, - request_to_kaddr(request, i), - request->handles[i].kernel, + kvaddr, request->handles[i].kernel, MMAP_VADDR(tap->ring.user_vstart, usr_idx, i), request->handles[i].user); if (request->handles[i].kernel == INVALID_GRANT_HANDLE) { - kvaddr = request_to_kaddr(request, i); blktap_umap_uaddr(&init_mm, kvaddr); flush_tlb_kernel_page(kvaddr); set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT, @@ -450,7 +449,7 @@ #ifdef BLKTAP_CHAINED_BLKTAP /* enable chained tap devices */ - tap_page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT); + tap_page = request_to_page(request, seg); set_page_private(tap_page, page_private(page)); SetPageBlkback(tap_page); #endif @@ -480,7 +479,7 @@ struct page *page; int i, grant, err, usr_idx; struct blktap_ring *ring; - unsigned long uvaddr, kvaddr, foreign_mfn; + unsigned long uvaddr, foreign_mfn; if (!table->cnt) return 0; @@ -498,7 +497,6 @@ continue; uvaddr = MMAP_VADDR(ring->user_vstart, usr_idx, i); - kvaddr = request_to_kaddr(request, i); if (unlikely(table->grants[grant].status)) { BTERR("invalid kernel buffer: could not remap it\n"); @@ -526,18 +524,19 @@ if (err) continue; - page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT); + page = request_to_page(request, i); if (!xen_feature(XENFEAT_auto_translated_physmap)) - set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT, + set_phys_to_machine(page_to_pfn(page), FOREIGN_FRAME(foreign_mfn)); else if (vm_insert_page(ring->vma, uvaddr, page)) err |= 1; BTDBG("pending_req: %p, seg: %d, page: %p, " - "kvaddr: 0x%08lx, khandle: %u, uvaddr: 0x%08lx, " + "kvaddr: 0x%p, khandle: %u, uvaddr: 0x%08lx, " "uhandle: %u\n", request, i, page, - kvaddr, request->handles[i].kernel, + pfn_to_kaddr(page_to_pfn(page)), + request->handles[i].kernel, uvaddr, request->handles[i].user); } @@ -585,7 +584,7 @@ struct scatterlist *sg; struct blktap_grant_table table; unsigned int fsect, lsect, nr_sects; - unsigned long offset, uvaddr, kvaddr; + unsigned long offset, uvaddr; struct blkif_request blkif_req, *target; err = -1; @@ -641,18 +640,17 @@ } uvaddr = MMAP_VADDR(ring->user_vstart, usr_idx, i); - kvaddr = request_to_kaddr(request, i); offset = (uvaddr - ring->vma->vm_start) >> PAGE_SHIFT; - page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT); + page = request_to_page(request, i); ring->foreign_map.map[offset] = page; SetPageReserved(page); BTDBG("mapped uaddr %08lx to page %p pfn 0x%lx\n", - uvaddr, page, __pa(kvaddr) >> PAGE_SHIFT); + uvaddr, page, page_to_pfn(page)); BTDBG("offset: 0x%08lx, pending_req: %p, seg: %d, " - "page: %p, kvaddr: 0x%08lx, uvaddr: 0x%08lx\n", + "page: %p, kvaddr: %p, uvaddr: 0x%08lx\n", offset, request, i, - page, kvaddr, uvaddr); + page, pfn_to_kaddr(page_to_pfn(page)), uvaddr); request->nr_pages++; } diff -r 6b5c16a30bc5 -r 317cdc16a674 drivers/xen/blktap/request.c --- a/drivers/xen/blktap/request.c Fri Dec 11 08:47:19 2009 +0000 +++ b/drivers/xen/blktap/request.c Wed Dec 16 16:44:12 2009 +0000 @@ -124,13 +124,12 @@ kfree(bucket); } -unsigned long -request_to_kaddr(struct blktap_request *req, int seg) +struct page * +request_to_page(struct blktap_request *req, int seg) { struct blktap_request_handle *handle = blktap_request_to_handle(req); int idx = handle->slot * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; - unsigned long pfn = page_to_pfn(handle->bucket->foreign_pages[idx]); - return (unsigned long)pfn_to_kaddr(pfn); + return handle->bucket->foreign_pages[idx]; } int _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |