[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [XEN] Track high-water-mark of p2m map
# HG changeset patch # User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx> # Node ID 11a93cc59159a1b8f7ee459534f3a7377bdfe375 # Parent 6e22ba7217201f3c3d7219d824f7aa80cd431c36 [XEN] Track high-water-mark of p2m map and so avoid some unnecessary __copy_from_user faults. Also tidy the p2m functions generally. Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx> --- xen/arch/x86/mm/shadow/common.c | 22 ++++++++------- xen/arch/x86/mm/shadow/types.h | 4 -- xen/include/asm-x86/domain.h | 2 + xen/include/asm-x86/hvm/io.h | 3 -- xen/include/asm-x86/mm.h | 28 ------------------- xen/include/asm-x86/shadow.h | 57 +++++++++++++++++++++++++++++++++++----- 6 files changed, 65 insertions(+), 51 deletions(-) diff -r 6e22ba721720 -r 11a93cc59159 xen/arch/x86/mm/shadow/common.c --- a/xen/arch/x86/mm/shadow/common.c Wed Nov 15 09:44:12 2006 +0000 +++ b/xen/arch/x86/mm/shadow/common.c Wed Nov 15 14:36:10 2006 +0000 @@ -1047,6 +1047,10 @@ shadow_set_p2m_entry(struct domain *d, u else *p2m_entry = l1e_empty(); + /* Track the highest gfn for which we have ever had a valid mapping */ + if ( valid_mfn(mfn) && (gfn > d->arch.max_mapped_pfn) ) + d->arch.max_mapped_pfn = gfn; + /* The P2M can be shadowed: keep the shadows synced */ if ( d->vcpu[0] != NULL ) (void)__shadow_validate_guest_entry( @@ -1142,12 +1146,9 @@ sh_gfn_to_mfn_foreign(struct domain *d, mfn = pagetable_get_mfn(d->arch.phys_table); -#if CONFIG_PAGING_LEVELS > 2 - if ( gpfn >= (RO_MPT_VIRT_END-RO_MPT_VIRT_START) / sizeof(l1_pgentry_t) ) - /* This pfn is higher than the p2m map can hold */ + if ( gpfn > d->arch.max_mapped_pfn ) + /* This pfn is higher than the highest the p2m map currently holds */ return _mfn(INVALID_MFN); -#endif - #if CONFIG_PAGING_LEVELS >= 4 { @@ -3333,13 +3334,14 @@ void shadow_audit_p2m(struct domain *d) set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY); } - if ( test_linear ) - { - lp2mfn = get_mfn_from_gpfn(gfn); - if ( lp2mfn != mfn_x(p2mfn) ) + if ( test_linear && (gfn <= d->arch.max_mapped_pfn) ) + { + lp2mfn = gfn_to_mfn_current(gfn); + if ( mfn_x(lp2mfn) != mfn_x(p2mfn) ) { SHADOW_PRINTK("linear mismatch gfn %#lx -> mfn %#lx " - "(!= mfn %#lx)\n", gfn, lp2mfn, p2mfn); + "(!= mfn %#lx)\n", gfn, + mfn_x(lp2mfn), mfn_x(p2mfn)); } } diff -r 6e22ba721720 -r 11a93cc59159 xen/arch/x86/mm/shadow/types.h --- a/xen/arch/x86/mm/shadow/types.h Wed Nov 15 09:44:12 2006 +0000 +++ b/xen/arch/x86/mm/shadow/types.h Wed Nov 15 14:36:10 2006 +0000 @@ -416,9 +416,7 @@ vcpu_gfn_to_mfn(struct vcpu *v, gfn_t gf { if ( !shadow_vcpu_mode_translate(v) ) return _mfn(gfn_x(gfn)); - if ( likely(current->domain == v->domain) ) - return _mfn(get_mfn_from_gpfn(gfn_x(gfn))); - return sh_gfn_to_mfn_foreign(v->domain, gfn_x(gfn)); + return sh_gfn_to_mfn(v->domain, gfn_x(gfn)); } static inline gfn_t diff -r 6e22ba721720 -r 11a93cc59159 xen/include/asm-x86/domain.h --- a/xen/include/asm-x86/domain.h Wed Nov 15 09:44:12 2006 +0000 +++ b/xen/include/asm-x86/domain.h Wed Nov 15 14:36:10 2006 +0000 @@ -111,6 +111,8 @@ struct arch_domain /* Shadow translated domain: P2M mapping */ pagetable_t phys_table; + /* Highest guest frame that's ever been mapped in the p2m */ + unsigned long max_mapped_pfn; } __cacheline_aligned; diff -r 6e22ba721720 -r 11a93cc59159 xen/include/asm-x86/hvm/io.h --- a/xen/include/asm-x86/hvm/io.h Wed Nov 15 09:44:12 2006 +0000 +++ b/xen/include/asm-x86/hvm/io.h Wed Nov 15 14:36:10 2006 +0000 @@ -151,8 +151,5 @@ extern int cpu_get_interrupt(struct vcpu extern int cpu_get_interrupt(struct vcpu *v, int *type); extern int cpu_has_pending_irq(struct vcpu *v); -// XXX - think about this, maybe use bit 30 of the mfn to signify an MMIO frame. -#define mmio_space(gpa) (!VALID_MFN(get_mfn_from_gpfn((gpa) >> PAGE_SHIFT))) - #endif /* __ASM_X86_HVM_IO_H__ */ diff -r 6e22ba721720 -r 11a93cc59159 xen/include/asm-x86/mm.h --- a/xen/include/asm-x86/mm.h Wed Nov 15 09:44:12 2006 +0000 +++ b/xen/include/asm-x86/mm.h Wed Nov 15 14:36:10 2006 +0000 @@ -304,37 +304,9 @@ int check_descriptor(struct desc_struct #define gmfn_to_mfn(_d, gpfn) mfn_x(sh_gfn_to_mfn(_d, gpfn)) - -/* - * The phys_to_machine_mapping is the reversed mapping of MPT for full - * virtualization. It is only used by shadow_mode_translate()==true - * guests, so we steal the address space that would have normally - * been used by the read-only MPT map. - */ -#define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START) #define INVALID_MFN (~0UL) #define VALID_MFN(_mfn) (!((_mfn) & (1U<<31))) -static inline unsigned long get_mfn_from_gpfn(unsigned long pfn) -{ - l1_pgentry_t l1e = l1e_empty(); - int ret; - -#if CONFIG_PAGING_LEVELS > 2 - if ( pfn >= (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t) ) - /* This pfn is higher than the p2m map can hold */ - return INVALID_MFN; -#endif - - ret = __copy_from_user(&l1e, - &phys_to_machine_mapping[pfn], - sizeof(l1e)); - - if ( (ret == 0) && (l1e_get_flags(l1e) & _PAGE_PRESENT) ) - return l1e_get_pfn(l1e); - - return INVALID_MFN; -} #ifdef MEMORY_GUARD void memguard_init(void); diff -r 6e22ba721720 -r 11a93cc59159 xen/include/asm-x86/shadow.h --- a/xen/include/asm-x86/shadow.h Wed Nov 15 09:44:12 2006 +0000 +++ b/xen/include/asm-x86/shadow.h Wed Nov 15 14:36:10 2006 +0000 @@ -663,12 +663,40 @@ struct shadow_walk_cache { /**************************************************************************/ -/* Guest physmap (p2m) support */ +/* Guest physmap (p2m) support + * + * The phys_to_machine_mapping is the reversed mapping of MPT for full + * virtualization. It is only used by shadow_mode_translate()==true + * guests, so we steal the address space that would have normally + * been used by the read-only MPT map. + */ + +#define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START) + +/* Read the current domain's P2M table. */ +static inline mfn_t sh_gfn_to_mfn_current(unsigned long gfn) +{ + l1_pgentry_t l1e = l1e_empty(); + int ret; + + if ( gfn > current->domain->arch.max_mapped_pfn ) + return _mfn(INVALID_MFN); + + /* Don't read off the end of the p2m table */ + ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t)); + + ret = __copy_from_user(&l1e, + &phys_to_machine_mapping[gfn], + sizeof(l1e)); + + if ( (ret == 0) && (l1e_get_flags(l1e) & _PAGE_PRESENT) ) + return _mfn(l1e_get_pfn(l1e)); + + return _mfn(INVALID_MFN); +} /* Walk another domain's P2M table, mapping pages as we go */ -extern mfn_t -sh_gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn); - +extern mfn_t sh_gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn); /* General conversion function from gfn to mfn */ static inline mfn_t @@ -676,12 +704,19 @@ sh_gfn_to_mfn(struct domain *d, unsigned { if ( !shadow_mode_translate(d) ) return _mfn(gfn); - else if ( likely(current->domain == d) ) - return _mfn(get_mfn_from_gpfn(gfn)); - else + if ( likely(current->domain == d) ) + return sh_gfn_to_mfn_current(gfn); + else return sh_gfn_to_mfn_foreign(d, gfn); } +/* Compatibility function for HVM code */ +static inline unsigned long get_mfn_from_gpfn(unsigned long pfn) +{ + return mfn_x(sh_gfn_to_mfn_current(pfn)); +} + +/* General conversion function from mfn to gfn */ static inline unsigned long sh_mfn_to_gfn(struct domain *d, mfn_t mfn) { @@ -689,6 +724,14 @@ sh_mfn_to_gfn(struct domain *d, mfn_t mf return get_gpfn_from_mfn(mfn_x(mfn)); else return mfn_x(mfn); +} + +/* Is this guest address an mmio one? (i.e. not defined in p2m map) */ +static inline int +mmio_space(paddr_t gpa) +{ + unsigned long gfn = gpa >> PAGE_SHIFT; + return !VALID_MFN(mfn_x(sh_gfn_to_mfn_current(gfn))); } static inline l1_pgentry_t _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |