[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 1/3] x86: Consolidate boolean inputs in hvm and p2m into a shared bitmap.
This patch consolidates the boolean input parameters of hvm_hap_nested_page_fault and p2m_mem_access_check into a common bitmap and defines the bitmap members accordingly. v5: Shared structure in mm.h, style fixes and moving gla fault type additions into next patch in the series. Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 45 +++++++++++++++++++++---------------------- xen/arch/x86/hvm/svm/svm.c | 15 +++++++++------ xen/arch/x86/hvm/vmx/vmx.c | 15 ++++++++++----- xen/arch/x86/mm/p2m.c | 18 ++++++++--------- xen/include/asm-x86/hvm/hvm.h | 7 ++----- xen/include/asm-x86/mm.h | 10 ++++++++++ xen/include/asm-x86/p2m.h | 6 +++--- 7 files changed, 65 insertions(+), 51 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index e834406..456d0f7 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2722,12 +2722,8 @@ void hvm_inject_page_fault(int errcode, unsigned long cr2) hvm_inject_trap(&trap); } -int hvm_hap_nested_page_fault(paddr_t gpa, - bool_t gla_valid, - unsigned long gla, - bool_t access_r, - bool_t access_w, - bool_t access_x) +int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, + struct mem_access_check npfec) { unsigned long gfn = gpa >> PAGE_SHIFT; p2m_type_t p2mt; @@ -2756,8 +2752,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, * into l1 guest if not fixable. The algorithm is * the same as for shadow paging. */ - rv = nestedhvm_hap_nested_page_fault(v, &gpa, - access_r, access_w, access_x); + + rv = nestedhvm_hap_nested_page_fault(v, &gpa, + npfec.read_access, + npfec.write_access, + npfec.insn_fetch); switch (rv) { case NESTEDHVM_PAGEFAULT_DONE: case NESTEDHVM_PAGEFAULT_RETRY: @@ -2793,38 +2792,39 @@ int hvm_hap_nested_page_fault(paddr_t gpa, p2m = p2m_get_hostp2m(v->domain); mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, - P2M_ALLOC | (access_w ? P2M_UNSHARE : 0), NULL); + P2M_ALLOC | npfec.write_access ? P2M_UNSHARE : 0, + NULL); /* Check access permissions first, then handle faults */ if ( mfn_x(mfn) != INVALID_MFN ) { - int violation = 0; + bool_t violation = 0; /* If the access is against the permissions, then send to mem_event */ switch (p2ma) { case p2m_access_n: case p2m_access_n2rwx: default: - violation = access_r || access_w || access_x; + violation = npfec.read_access || npfec.write_access || npfec.insn_fetch; break; case p2m_access_r: - violation = access_w || access_x; + violation = npfec.write_access || npfec.insn_fetch; break; case p2m_access_w: - violation = access_r || access_x; + violation = npfec.read_access || npfec.insn_fetch; break; case p2m_access_x: - violation = access_r || access_w; + violation = npfec.read_access || npfec.write_access; break; case p2m_access_rx: case p2m_access_rx2rw: - violation = access_w; + violation = npfec.write_access; break; case p2m_access_wx: - violation = access_r; + violation = npfec.read_access; break; case p2m_access_rw: - violation = access_x; + violation = npfec.insn_fetch; break; case p2m_access_rwx: break; @@ -2832,8 +2832,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, if ( violation ) { - if ( p2m_mem_access_check(gpa, gla_valid, gla, access_r, - access_w, access_x, &req_ptr) ) + if ( p2m_mem_access_check(gpa, gla, npfec, &req_ptr) ) { fall_through = 1; } else { @@ -2849,7 +2848,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, * to the mmio handler. */ if ( (p2mt == p2m_mmio_dm) || - (access_w && (p2mt == p2m_ram_ro)) ) + (npfec.write_access && (p2mt == p2m_ram_ro)) ) { put_gfn(p2m->domain, gfn); @@ -2868,7 +2867,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, paged = 1; /* Mem sharing: unshare the page and try again */ - if ( access_w && (p2mt == p2m_ram_shared) ) + if ( npfec.write_access && (p2mt == p2m_ram_shared) ) { ASSERT(!p2m_is_nestedp2m(p2m)); sharing_enomem = @@ -2885,7 +2884,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, * a large page, we do not change other pages type within that large * page. */ - if ( access_w ) + if ( npfec.write_access ) { paging_mark_dirty(v->domain, mfn_x(mfn)); p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw); @@ -2895,7 +2894,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, } /* Shouldn't happen: Maybe the guest was writing to a r/o grant mapping? */ - if ( access_w && (p2mt == p2m_grant_map_ro) ) + if ( npfec.write_access && (p2mt == p2m_grant_map_ro) ) { gdprintk(XENLOG_WARNING, "trying to write to read-only grant mapping\n"); diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 76616ac..a7fac00 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1394,7 +1394,7 @@ const struct hvm_function_table * __init start_svm(void) } static void svm_do_nested_pgfault(struct vcpu *v, - struct cpu_user_regs *regs, uint32_t npfec, paddr_t gpa) + struct cpu_user_regs *regs, uint32_t pfec, paddr_t gpa) { int ret; unsigned long gfn = gpa >> PAGE_SHIFT; @@ -1403,10 +1403,13 @@ static void svm_do_nested_pgfault(struct vcpu *v, p2m_access_t p2ma; struct p2m_domain *p2m = NULL; - ret = hvm_hap_nested_page_fault(gpa, 0, ~0ul, - 1, /* All NPFs count as reads */ - npfec & PFEC_write_access, - npfec & PFEC_insn_fetch); + struct mem_access_check npfec = { + .read_access = 1, /* All NPFs count as reads */ + .write_access = !!(pfec & PFEC_write_access), + .insn_fetch = !!(pfec & PFEC_insn_fetch) + }; + + ret = hvm_hap_nested_page_fault(gpa, ~0ul, npfec); if ( tb_init_done ) { @@ -1434,7 +1437,7 @@ static void svm_do_nested_pgfault(struct vcpu *v, case -1: ASSERT(nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v)); /* inject #VMEXIT(NPF) into guest. */ - nestedsvm_vmexit_defer(v, VMEXIT_NPF, npfec, gpa); + nestedsvm_vmexit_defer(v, VMEXIT_NPF, pfec, gpa); return; } diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 2caa04a..1b328dd 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2353,6 +2353,11 @@ static void ept_handle_violation(unsigned long qualification, paddr_t gpa) p2m_type_t p2mt; int ret; struct domain *d = current->domain; + struct mem_access_check npfec = { + .read_access = !!(qualification & EPT_READ_VIOLATION), + .write_access = !!(qualification & EPT_WRITE_VIOLATION), + .insn_fetch = !!(qualification & EPT_EXEC_VIOLATION) + }; if ( tb_init_done ) { @@ -2371,14 +2376,14 @@ static void ept_handle_violation(unsigned long qualification, paddr_t gpa) } if ( qualification & EPT_GLA_VALID ) + { __vmread(GUEST_LINEAR_ADDRESS, &gla); + npfec.gla_valid = 1; + } else gla = ~0ull; - ret = hvm_hap_nested_page_fault(gpa, - !!(qualification & EPT_GLA_VALID), gla, - !!(qualification & EPT_READ_VIOLATION), - !!(qualification & EPT_WRITE_VIOLATION), - !!(qualification & EPT_EXEC_VIOLATION)); + + ret = hvm_hap_nested_page_fault(gpa, gla, npfec); switch ( ret ) { case 0: // Unhandled L1 EPT violation diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index bca9f0f..34c792c 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -1323,9 +1323,9 @@ void p2m_mem_paging_resume(struct domain *d) } } -bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla, - bool_t access_r, bool_t access_w, bool_t access_x, - mem_event_request_t **req_ptr) +bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla, + struct mem_access_check check, + mem_event_request_t **req_ptr) { struct vcpu *v = current; unsigned long gfn = gpa >> PAGE_SHIFT; @@ -1343,7 +1343,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla, gfn_lock(p2m, gfn, 0); mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL); - if ( access_w && p2ma == p2m_access_rx2rw ) + if ( check.write_access && p2ma == p2m_access_rx2rw ) { rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw); ASSERT(rc == 0); @@ -1352,7 +1352,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla, } else if ( p2ma == p2m_access_n2rwx ) { - ASSERT(access_w || access_r || access_x); + ASSERT(check.write_access || check.read_access || check.insn_fetch); rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx); ASSERT(rc == 0); @@ -1403,11 +1403,11 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla, /* Send request to mem event */ req->gfn = gfn; req->offset = gpa & ((1 << PAGE_SHIFT) - 1); - req->gla_valid = gla_valid; + req->gla_valid = check.gla_valid; req->gla = gla; - req->access_r = access_r; - req->access_w = access_w; - req->access_x = access_x; + req->access_r = check.read_access; + req->access_w = check.write_access; + req->access_x = check.insn_fetch; req->vcpu_id = v->vcpu_id; } diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 0ebd478..8768280 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -455,11 +455,8 @@ static inline void hvm_invalidate_regs_fields(struct cpu_user_regs *regs) #endif } -int hvm_hap_nested_page_fault(paddr_t gpa, - bool_t gla_valid, unsigned long gla, - bool_t access_r, - bool_t access_w, - bool_t access_x); +int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, + struct mem_access_check npfec); #define hvm_msr_tsc_aux(v) ({ \ struct domain *__d = (v)->domain; \ diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index d253117..9327f99 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -551,6 +551,16 @@ void audit_domains(void); #endif +/* + * Information used to perform mem access checks. + */ +struct mem_access_check { + unsigned int read_access:1; + unsigned int write_access:1; + unsigned int insn_fetch:1; + unsigned int gla_valid:1; +}; + int new_guest_cr3(unsigned long pfn); void make_cr3(struct vcpu *v, unsigned long mfn); void update_cr3(struct vcpu *v); diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index 0ddbadb..7bab722 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -597,9 +597,9 @@ void p2m_mem_paging_resume(struct domain *d); * been promoted with no underlying vcpu pause. If the req_ptr has been populated, * then the caller must put the event in the ring (once having released get_gfn* * locks -- caller must also xfree the request. */ -bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla, - bool_t access_r, bool_t access_w, bool_t access_x, - mem_event_request_t **req_ptr); +bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla, + struct mem_access_check check, + mem_event_request_t **req_ptr); /* Resumes the running of the VCPU, restarting the last instruction */ void p2m_mem_access_resume(struct domain *d); -- 2.0.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |