[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] nestedhvm: fix write access fault on ro mapping
# HG changeset patch # User Christoph Egger <Christoph.Egger@xxxxxxx> # Date 1343914689 -3600 # Node ID c323f1af7e677e791d13f7d6a62abeec55a4a2d5 # Parent 90bc5e0a67b5ba896bd9cf1f92b345793010adc3 nestedhvm: fix write access fault on ro mapping Fix write access fault when host npt is mapped read-only. In this case let the host handle the #NPF. Apply host p2mt to hap-on-hap pagetable entry. This fixes the l2 guest graphic display refresh problem. Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> Committed-by: Tim Deegan <tim@xxxxxxx> --- diff -r 90bc5e0a67b5 -r c323f1af7e67 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Thu Aug 02 12:04:31 2012 +0100 +++ b/xen/arch/x86/hvm/hvm.c Thu Aug 02 14:38:09 2012 +0100 @@ -1278,12 +1278,14 @@ int hvm_hap_nested_page_fault(unsigned l * into l1 guest if not fixable. The algorithm is * the same as for shadow paging. */ - rv = nestedhvm_hap_nested_page_fault(v, gpa, + rv = nestedhvm_hap_nested_page_fault(v, &gpa, access_r, access_w, access_x); switch (rv) { case NESTEDHVM_PAGEFAULT_DONE: return 1; - case NESTEDHVM_PAGEFAULT_ERROR: + case NESTEDHVM_PAGEFAULT_L1_ERROR: + /* An error occured while translating gpa from + * l2 guest address to l1 guest address. */ return 0; case NESTEDHVM_PAGEFAULT_INJECT: return -1; @@ -1291,6 +1293,10 @@ int hvm_hap_nested_page_fault(unsigned l if ( !handle_mmio() ) hvm_inject_hw_exception(TRAP_gp_fault, 0); return 1; + case NESTEDHVM_PAGEFAULT_L0_ERROR: + /* gpa is now translated to l1 guest address, update gfn. */ + gfn = gpa >> PAGE_SHIFT; + break; } } diff -r 90bc5e0a67b5 -r c323f1af7e67 xen/arch/x86/mm/hap/nested_hap.c --- a/xen/arch/x86/mm/hap/nested_hap.c Thu Aug 02 12:04:31 2012 +0100 +++ b/xen/arch/x86/mm/hap/nested_hap.c Thu Aug 02 14:38:09 2012 +0100 @@ -141,26 +141,29 @@ nestedhap_fix_p2m(struct vcpu *v, struct */ static int nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa, - unsigned int *page_order) + p2m_type_t *p2mt, + unsigned int *page_order, + bool_t access_r, bool_t access_w, bool_t access_x) { mfn_t mfn; - p2m_type_t p2mt; p2m_access_t p2ma; int rc; /* walk L0 P2M table */ - mfn = get_gfn_type_access(p2m, L1_gpa >> PAGE_SHIFT, &p2mt, &p2ma, + mfn = get_gfn_type_access(p2m, L1_gpa >> PAGE_SHIFT, p2mt, &p2ma, 0, page_order); rc = NESTEDHVM_PAGEFAULT_MMIO; - if ( p2m_is_mmio(p2mt) ) + if ( p2m_is_mmio(*p2mt) ) goto out; - rc = NESTEDHVM_PAGEFAULT_ERROR; - if ( p2m_is_paging(p2mt) || p2m_is_shared(p2mt) || !p2m_is_ram(p2mt) ) + rc = NESTEDHVM_PAGEFAULT_L0_ERROR; + if ( access_w && p2m_is_readonly(*p2mt) ) goto out; - rc = NESTEDHVM_PAGEFAULT_ERROR; + if ( p2m_is_paging(*p2mt) || p2m_is_shared(*p2mt) || !p2m_is_ram(*p2mt) ) + goto out; + if ( !mfn_valid(mfn) ) goto out; @@ -207,7 +210,7 @@ nestedhap_walk_L1_p2m(struct vcpu *v, pa * Returns: */ int -nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t L2_gpa, +nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa, bool_t access_r, bool_t access_w, bool_t access_x) { int rv; @@ -215,19 +218,20 @@ nestedhvm_hap_nested_page_fault(struct v struct domain *d = v->domain; struct p2m_domain *p2m, *nested_p2m; unsigned int page_order_21, page_order_10, page_order_20; + p2m_type_t p2mt_10; p2m = p2m_get_hostp2m(d); /* L0 p2m */ nested_p2m = p2m_get_nestedp2m(v, nhvm_vcpu_hostcr3(v)); /* walk the L1 P2M table */ - rv = nestedhap_walk_L1_p2m(v, L2_gpa, &L1_gpa, &page_order_21, + rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21, access_r, access_w, access_x); /* let caller to handle these two cases */ switch (rv) { case NESTEDHVM_PAGEFAULT_INJECT: return rv; - case NESTEDHVM_PAGEFAULT_ERROR: + case NESTEDHVM_PAGEFAULT_L1_ERROR: return rv; case NESTEDHVM_PAGEFAULT_DONE: break; @@ -237,13 +241,16 @@ nestedhvm_hap_nested_page_fault(struct v } /* ==> we have to walk L0 P2M */ - rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa, &page_order_10); + rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa, + &p2mt_10, &page_order_10, + access_r, access_w, access_x); /* let upper level caller to handle these two cases */ switch (rv) { case NESTEDHVM_PAGEFAULT_INJECT: return rv; - case NESTEDHVM_PAGEFAULT_ERROR: + case NESTEDHVM_PAGEFAULT_L0_ERROR: + *L2_gpa = L1_gpa; return rv; case NESTEDHVM_PAGEFAULT_DONE: break; @@ -257,9 +264,9 @@ nestedhvm_hap_nested_page_fault(struct v page_order_20 = min(page_order_21, page_order_10); /* fix p2m_get_pagetable(nested_p2m) */ - nestedhap_fix_p2m(v, nested_p2m, L2_gpa, L0_gpa, page_order_20, - p2m_ram_rw, - p2m_access_rwx /* FIXME: Should use same permission as l1 guest */); + nestedhap_fix_p2m(v, nested_p2m, *L2_gpa, L0_gpa, page_order_20, + p2mt_10, + p2m_access_rwx /* FIXME: Should use minimum permission. */); return NESTEDHVM_PAGEFAULT_DONE; } diff -r 90bc5e0a67b5 -r c323f1af7e67 xen/include/asm-x86/hvm/nestedhvm.h --- a/xen/include/asm-x86/hvm/nestedhvm.h Thu Aug 02 12:04:31 2012 +0100 +++ b/xen/include/asm-x86/hvm/nestedhvm.h Thu Aug 02 14:38:09 2012 +0100 @@ -47,11 +47,12 @@ bool_t nestedhvm_vcpu_in_guestmode(struc vcpu_nestedhvm(v).nv_guestmode = 0 /* Nested paging */ -#define NESTEDHVM_PAGEFAULT_DONE 0 -#define NESTEDHVM_PAGEFAULT_INJECT 1 -#define NESTEDHVM_PAGEFAULT_ERROR 2 -#define NESTEDHVM_PAGEFAULT_MMIO 3 -int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t L2_gpa, +#define NESTEDHVM_PAGEFAULT_DONE 0 +#define NESTEDHVM_PAGEFAULT_INJECT 1 +#define NESTEDHVM_PAGEFAULT_L1_ERROR 2 +#define NESTEDHVM_PAGEFAULT_L0_ERROR 3 +#define NESTEDHVM_PAGEFAULT_MMIO 4 +int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa, bool_t access_r, bool_t access_w, bool_t access_x); /* IO permission map */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |