[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/mm: Ensure maps used by nested hvm code cannot be paged out
# HG changeset patch # User Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx> # Date 1322760084 0 # Node ID f71ecec8be2e6bfd3ad9543395194514ef8c8bd9 # Parent 3075955bbea4bf9430a3b800ff157e99ad2d8fd0 x86/mm: Ensure maps used by nested hvm code cannot be paged out The nested hvm code maps pages of the guest hvm. These maps live beyond a hypervisor entry/exit pair, and thus their liveness cannot be ensured with get_gfn/put_gfn critical sections. Ensure their liveness by increasing the page ref count, instead. Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> Committed-by: Tim Deegan <tim@xxxxxxx> --- diff -r 3075955bbea4 -r f71ecec8be2e xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Thu Dec 01 17:21:24 2011 +0000 +++ b/xen/arch/x86/hvm/hvm.c Thu Dec 01 17:21:24 2011 +0000 @@ -1823,12 +1823,16 @@ return 0; } -/* We leave this function holding a lock on the p2m entry */ +/* On non-NULL return, we leave this function holding an additional + * ref on the underlying mfn, if any */ static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable) { + void *map; unsigned long mfn; p2m_type_t p2mt; + struct page_info *pg; struct domain *d = current->domain; + int rc; mfn = mfn_x(writable ? get_gfn_unshare(d, gfn, &p2mt) @@ -1850,7 +1854,21 @@ if ( writable ) paging_mark_dirty(d, mfn); - return map_domain_page(mfn); + /* Get a ref on the page, considering that it could be shared */ + pg = mfn_to_page(mfn); + rc = get_page(pg, d); + if ( !rc && !writable ) + /* Page could be shared */ + rc = get_page(pg, dom_cow); + if ( !rc ) + { + put_gfn(d, gfn); + return NULL; + } + + map = map_domain_page(mfn); + put_gfn(d, gfn); + return map; } void *hvm_map_guest_frame_rw(unsigned long gfn) @@ -1866,11 +1884,16 @@ void hvm_unmap_guest_frame(void *p) { if ( p ) + { + unsigned long mfn = domain_page_map_to_mfn(p); unmap_domain_page(p); + put_page(mfn_to_page(mfn)); + } } -static void *hvm_map_entry(unsigned long va, unsigned long *gfn) +static void *hvm_map_entry(unsigned long va) { + unsigned long gfn; uint32_t pfec; char *v; @@ -1887,11 +1910,11 @@ * treat it as a kernel-mode read (i.e. no access checks). */ pfec = PFEC_page_present; - *gfn = paging_gva_to_gfn(current, va, &pfec); + gfn = paging_gva_to_gfn(current, va, &pfec); if ( (pfec == PFEC_page_paged) || (pfec == PFEC_page_shared) ) goto fail; - v = hvm_map_guest_frame_rw(*gfn); + v = hvm_map_guest_frame_rw(gfn); if ( v == NULL ) goto fail; @@ -1902,11 +1925,9 @@ return NULL; } -static void hvm_unmap_entry(void *p, unsigned long gfn) +static void hvm_unmap_entry(void *p) { hvm_unmap_guest_frame(p); - if ( p && (gfn != INVALID_GFN) ) - put_gfn(current->domain, gfn); } static int hvm_load_segment_selector( @@ -1918,7 +1939,6 @@ int fault_type = TRAP_invalid_tss; struct cpu_user_regs *regs = guest_cpu_user_regs(); struct vcpu *v = current; - unsigned long pdesc_gfn = INVALID_GFN; if ( regs->eflags & X86_EFLAGS_VM ) { @@ -1952,7 +1972,7 @@ if ( ((sel & 0xfff8) + 7) > desctab.limit ) goto fail; - pdesc = hvm_map_entry(desctab.base + (sel & 0xfff8), &pdesc_gfn); + pdesc = hvm_map_entry(desctab.base + (sel & 0xfff8)); if ( pdesc == NULL ) goto hvm_map_fail; @@ -2012,7 +2032,7 @@ desc.b |= 0x100; skip_accessed_flag: - hvm_unmap_entry(pdesc, pdesc_gfn); + hvm_unmap_entry(pdesc); segr.base = (((desc.b << 0) & 0xff000000u) | ((desc.b << 16) & 0x00ff0000u) | @@ -2028,7 +2048,7 @@ return 0; unmap_and_fail: - hvm_unmap_entry(pdesc, pdesc_gfn); + hvm_unmap_entry(pdesc); fail: hvm_inject_exception(fault_type, sel & 0xfffc, 0); hvm_map_fail: @@ -2043,7 +2063,7 @@ struct cpu_user_regs *regs = guest_cpu_user_regs(); struct segment_register gdt, tr, prev_tr, segr; struct desc_struct *optss_desc = NULL, *nptss_desc = NULL, tss_desc; - unsigned long eflags, optss_gfn = INVALID_GFN, nptss_gfn = INVALID_GFN; + unsigned long eflags; int exn_raised, rc; struct { u16 back_link,__blh; @@ -2069,11 +2089,11 @@ goto out; } - optss_desc = hvm_map_entry(gdt.base + (prev_tr.sel & 0xfff8), &optss_gfn); + optss_desc = hvm_map_entry(gdt.base + (prev_tr.sel & 0xfff8)); if ( optss_desc == NULL ) goto out; - nptss_desc = hvm_map_entry(gdt.base + (tss_sel & 0xfff8), &nptss_gfn); + nptss_desc = hvm_map_entry(gdt.base + (tss_sel & 0xfff8)); if ( nptss_desc == NULL ) goto out; @@ -2238,8 +2258,8 @@ } out: - hvm_unmap_entry(optss_desc, optss_gfn); - hvm_unmap_entry(nptss_desc, nptss_gfn); + hvm_unmap_entry(optss_desc); + hvm_unmap_entry(nptss_desc); } #define HVMCOPY_from_guest (0u<<0) diff -r 3075955bbea4 -r f71ecec8be2e xen/arch/x86/hvm/svm/nestedsvm.c --- a/xen/arch/x86/hvm/svm/nestedsvm.c Thu Dec 01 17:21:24 2011 +0000 +++ b/xen/arch/x86/hvm/svm/nestedsvm.c Thu Dec 01 17:21:24 2011 +0000 @@ -81,10 +81,6 @@ if (nv->nv_vvmcx == NULL) return 0; nv->nv_vvmcxaddr = vmcbaddr; - /* put_gfn here even though the map survives beyond this caller. - * The map can likely survive beyond a hypervisor exit, thus we - * need to put the gfn */ - put_gfn(current->domain, vmcbaddr >> PAGE_SHIFT); } return 1; @@ -358,7 +354,6 @@ ioport_80 = test_bit(0x80, ns_viomap); ioport_ed = test_bit(0xed, ns_viomap); hvm_unmap_guest_frame(ns_viomap); - put_gfn(current->domain, svm->ns_iomap_pa >> PAGE_SHIFT); svm->ns_iomap = nestedhvm_vcpu_iomap_get(ioport_80, ioport_ed); @@ -889,7 +884,6 @@ enabled = test_bit(port, io_bitmap); hvm_unmap_guest_frame(io_bitmap); - put_gfn(current->domain, gfn); if (!enabled) return NESTEDHVM_VMEXIT_HOST; diff -r 3075955bbea4 -r f71ecec8be2e xen/arch/x86/hvm/vmx/vvmx.c --- a/xen/arch/x86/hvm/vmx/vvmx.c Thu Dec 01 17:21:24 2011 +0000 +++ b/xen/arch/x86/hvm/vmx/vvmx.c Thu Dec 01 17:21:24 2011 +0000 @@ -560,10 +560,7 @@ if (nvmx->iobitmap[index]) hvm_unmap_guest_frame (nvmx->iobitmap[index]); gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg); - nvmx->iobitmap[index] = hvm_map_guest_frame_ro (gpa >> PAGE_SHIFT); - /* See comment in nestedsvm_vmcb_map re putting this gfn and - * liveness of the map it backs */ - put_gfn(current->domain, gpa >> PAGE_SHIFT); + nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT); } static inline void map_io_bitmap_all(struct vcpu *v) @@ -1138,12 +1135,9 @@ if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR ) { - nvcpu->nv_vvmcx = hvm_map_guest_frame_rw (gpa >> PAGE_SHIFT); + nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT); nvcpu->nv_vvmcxaddr = gpa; map_io_bitmap_all (v); - /* See comment in nestedsvm_vmcb_map regarding putting this - * gfn and liveness of the map that uses it */ - put_gfn(current->domain, gpa >> PAGE_SHIFT); } vmreturn(regs, VMSUCCEED); @@ -1205,7 +1199,6 @@ if ( vvmcs ) __set_vvmcs(vvmcs, NVMX_LAUNCH_STATE, 0); hvm_unmap_guest_frame(vvmcs); - put_gfn(current->domain, gpa >> PAGE_SHIFT); } vmreturn(regs, VMSUCCEED); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |