[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [XenPPC] [linux-ppc-2.6] [LINUX][XEN][POWERPC] More accurate Grant Table Ops
# HG changeset patch # User Jimi Xenidis <jimix@xxxxxxxxxxxxxx> # Node ID f18ec562c11cabed94d448a12754660c63497fac # Parent 2765c83b576d5af048d381c481e33f597c2be258 [LINUX][XEN][POWERPC] More accurate Grant Table Ops This patch add the following: - More DBG() points of interest - If we run out of Bolted PTEs simply report it and let the "natural" page fault occur - Use the page counters to track PTE redundancies - BUG() if GNTTABOP_transfer is ever used - Add GNTTABOP_copy The use of page counters may have SMP ramifications since the Linear map inits the refcount to 1 and there is no atomic methods to inc/dec and see if counter == 1. Signed-off-by: Jimi Xenidis <jimix@xxxxxxxxxxxxxx> --- arch/powerpc/platforms/xen/gnttab.c | 109 +++++++++++++++++++++++++++--------- 1 files changed, 82 insertions(+), 27 deletions(-) diff -r 2765c83b576d -r f18ec562c11c arch/powerpc/platforms/xen/gnttab.c --- a/arch/powerpc/platforms/xen/gnttab.c Mon Oct 09 19:57:42 2006 -0400 +++ b/arch/powerpc/platforms/xen/gnttab.c Tue Oct 10 09:48:33 2006 -0400 @@ -39,6 +39,8 @@ static long map_to_linear(ulong paddr) mode = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; vaddr = (ulong)__va(paddr); + DBG("%s: 0x%lx: 0x%x\n", + __func__, paddr, page_count(virt_to_page(vaddr))); { unsigned long vpn, hash, hpteg; unsigned long vsid = get_kernel_vsid(vaddr); @@ -57,25 +59,13 @@ static long map_to_linear(ulong paddr) BUG_ON(!ppc_md.hpte_insert); slot = ppc_md.hpte_insert(hpteg, va, paddr, tmp_mode, HPTE_V_BOLTED, psize); - BUG_ON(slot < 0); + if (slot < 0) + printk(KERN_EMERG + "%s: no more bolted entries " + "HTAB[0x%lx]: 0x%lx\n", + __func__, hpteg, paddr); } return slot; -} - -static void gnttab_post_map_grant_ref( - struct gnttab_map_grant_ref *map, int count) -{ - int i; - long slot; - - for (i = 0 ; i < count; i++) { - ulong pa = map[i].dev_bus_addr; - BUG_ON(pa < foreign_map_base || pa >= foreign_map_end); - - slot = map_to_linear(pa); - /* store the slot somewhere */ - map[i].host_addr = (ulong)__va(pa); - } } static unsigned long get_hpte_vsid(ulong slot) @@ -128,29 +118,89 @@ static long find_hpte_slot(unsigned long return -1; } +static long find_map_slot(ulong ea) +{ + int psize = MMU_PAGE_4K; + ulong vsid; + ulong va; + + vsid = get_kernel_vsid(ea); + va = (vsid << 28) | (ea & 0x0fffffff); + + return find_hpte_slot(va, psize); +} + + static void gnttab_pre_unmap_grant_ref( struct gnttab_unmap_grant_ref *unmap, int count) { long slot; - ulong vsid; - ulong va; - int psize = MMU_PAGE_4K; int i; ulong ea; unsigned long dummy1, dummy2; for (i = 0 ; i < count; i++) { + struct page *page; + ea = unmap[i].host_addr; - vsid = get_kernel_vsid(ea); - va = (vsid << 28) | (ea & 0x0fffffff); - - slot = find_hpte_slot(va, psize); - BUG_ON(slot < 0); - + page = virt_to_page(ea); + + /* Unfortunately, there is no put_page_testone() like + * put_page_testzero(). The Linear Map starts all + * pages with a count of 1, so there may be SMP issues + * here. */ + + put_page(page); + if (page_count(page) > 1) { + DBG("%s: skip: 0x%lx\n", __func__, ea); + continue; + } + slot = find_map_slot(ea); + if (slot < 0) { + printk(KERN_EMERG "%s: PTE not found: 0x%lx\n", + __func__, ea); + continue; + } + + DBG("%s: 0x%lx: 0x%x\n", + __func__, ea, page_count(virt_to_page(ea))); plpar_pte_remove(0, slot, 0, &dummy1, &dummy2); DBG("%s: remove_pages(0x%lx, 0x%lx)\n", __func__, unmap[i].host_addr, unmap[i].dev_bus_addr); + } +} + +static void gnttab_post_map_grant_ref( + struct gnttab_map_grant_ref *map, int count) +{ + int i; + long slot; + + for (i = 0 ; i < count; i++) { + ulong pa = map[i].dev_bus_addr; + struct page *page; + + BUG_ON(pa < foreign_map_base || pa >= foreign_map_end); + + /* ??? store the slot somewhere ??? */ + map[i].host_addr = (ulong)__va(pa); + page = virt_to_page(map[i].host_addr); + + if (page_count(page) == 1) { +#ifdef DEBUG + /* we need to get smarted than this */ + slot = find_map_slot((ulong)__va(pa)); + if (slot >= 0) { + DBG("%s: redundant 0x%lx\n", __func__, pa); + continue; + } +#endif + slot = map_to_linear(pa); + } else { + DBG("%s: skip 0x%lx\n", __func__, pa); + } + get_page(page); } } @@ -175,7 +225,8 @@ int HYPERVISOR_grant_table_op(unsigned i memcpy(&setup, op, sizeof(setup)); argsize = sizeof(setup); - frame_list = xencomm_create_inline(xen_guest_handle(setup.frame_list)); + frame_list = xencomm_create_inline( + xen_guest_handle(setup.frame_list)); set_xen_guest_handle(setup.frame_list, frame_list); memcpy(op, &setup, sizeof(setup)); @@ -185,6 +236,10 @@ int HYPERVISOR_grant_table_op(unsigned i argsize = sizeof(struct gnttab_dump_table); break; case GNTTABOP_transfer: + BUG(); + argsize = sizeof(struct gnttab_transfer); + break; + case GNTTABOP_copy: argsize = sizeof(struct gnttab_transfer); break; default: _______________________________________________ Xen-ppc-devel mailing list Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-ppc-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |