[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-3.4-testing] x86: Clean up get_page_from_l1e() to correctly distinguish between
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1244109170 -3600 # Node ID ab1718dc8024e3b14d79f867cef7066593270d8e # Parent 2e0834d5e01d6231753bd39849bf578670fb84de x86: Clean up get_page_from_l1e() to correctly distinguish between owner-of-pte and owner-of-data-page in all cases. Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> xen-unstable changeset: 19708:c4b048ce6a4b xen-unstable date: Wed Jun 03 14:40:34 2009 +0100 --- xen/arch/x86/mm.c | 81 ++++++++++++++++++++--------------------- xen/arch/x86/mm/shadow/multi.c | 4 +- xen/include/asm-x86/mm.h | 5 +- 3 files changed, 46 insertions(+), 44 deletions(-) diff -r 2e0834d5e01d -r ab1718dc8024 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Thu Jun 04 10:52:06 2009 +0100 +++ b/xen/arch/x86/mm.c Thu Jun 04 10:52:50 2009 +0100 @@ -702,66 +702,69 @@ int is_iomem_page(unsigned long mfn) return (page_get_owner(page) == dom_io); } - int get_page_from_l1e( - l1_pgentry_t l1e, struct domain *d) + l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner) { unsigned long mfn = l1e_get_pfn(l1e); struct page_info *page = mfn_to_page(mfn); uint32_t l1f = l1e_get_flags(l1e); struct vcpu *curr = current; - struct domain *owner; + struct domain *real_pg_owner; if ( !(l1f & _PAGE_PRESENT) ) return 1; - if ( unlikely(l1f & l1_disallow_mask(d)) ) - { - MEM_LOG("Bad L1 flags %x", l1f & l1_disallow_mask(d)); + if ( unlikely(l1f & l1_disallow_mask(l1e_owner)) ) + { + MEM_LOG("Bad L1 flags %x", l1f & l1_disallow_mask(l1e_owner)); return 0; } if ( !mfn_valid(mfn) || - (owner = page_get_owner_and_reference(page)) == dom_io ) + (real_pg_owner = page_get_owner_and_reference(page)) == dom_io ) { /* Only needed the reference to confirm dom_io ownership. */ if ( mfn_valid(mfn) ) put_page(page); /* DOMID_IO reverts to caller for privilege checks. */ - if ( d == dom_io ) - d = curr->domain; - - if ( !iomem_access_permitted(d, mfn, mfn) ) + if ( pg_owner == dom_io ) + pg_owner = curr->domain; + + if ( !iomem_access_permitted(pg_owner, mfn, mfn) ) { if ( mfn != (PADDR_MASK >> PAGE_SHIFT) ) /* INVALID_MFN? */ MEM_LOG("Non-privileged (%u) attempt to map I/O space %08lx", - d->domain_id, mfn); + pg_owner->domain_id, mfn); return 0; } return 1; } - if ( owner == NULL ) + if ( real_pg_owner == NULL ) goto could_not_pin; - /* - * Let privileged domains transfer the right to map their target - * domain's pages. This is used to allow stub-domain pvfb export to dom0, - * until pvfb supports granted mappings. At that time this minor hack - * can go away. - */ - if ( unlikely(d != owner) && (d != curr->domain) && IS_PRIV_FOR(d, owner) ) - d = owner; + if ( unlikely(real_pg_owner != pg_owner) ) + { + /* + * Let privileged domains transfer the right to map their target + * domain's pages. This is used to allow stub-domain pvfb export to + * dom0, until pvfb supports granted mappings. At that time this + * minor hack can go away. + */ + if ( (pg_owner == l1e_owner) || !IS_PRIV_FOR(pg_owner, real_pg_owner) ) + goto could_not_pin; + pg_owner = real_pg_owner; + } /* Foreign mappings into guests in shadow external mode don't * contribute to writeable mapping refcounts. (This allows the * qemu-dm helper process in dom0 to map the domain's memory without * messing up the count of "real" writable mappings.) */ if ( (l1f & _PAGE_RW) && - !(paging_mode_external(d) && (d != curr->domain)) && + ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) && !get_page_type(page, PGT_writable_page) ) goto could_not_pin; @@ -774,8 +777,7 @@ get_page_from_l1e( if ( is_xen_heap_page(page) ) { if ( (l1f & _PAGE_RW) && - !(unlikely(paging_mode_external(d) && - (d != curr->domain))) ) + ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) ) put_page_type(page); put_page(page); MEM_LOG("Attempt to change cache attributes of Xen heap page"); @@ -799,10 +801,10 @@ get_page_from_l1e( could_not_pin: MEM_LOG("Error getting mfn %lx (pfn %lx) from L1 entry %" PRIpte - " for dom%d", + " for l1e_owner=%d, pg_owner=%d", mfn, get_gpfn_from_mfn(mfn), - l1e_get_intpte(l1e), d->domain_id); - if ( owner != NULL ) + l1e_get_intpte(l1e), l1e_owner->domain_id, pg_owner->domain_id); + if ( real_pg_owner != NULL ) put_page(page); return 0; } @@ -981,19 +983,18 @@ get_page_from_l4e( #define unadjust_guest_l3e(_p, _d) ((void)(_d)) #endif -void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d) +void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner) { unsigned long pfn = l1e_get_pfn(l1e); struct page_info *page; - struct domain *e; + struct domain *pg_owner; struct vcpu *v; if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || is_iomem_page(pfn) ) return; page = mfn_to_page(pfn); - - e = page_get_owner(page); + pg_owner = page_get_owner(page); /* * Check if this is a mapping that was established via a grant reference. @@ -1009,17 +1010,17 @@ void put_page_from_l1e(l1_pgentry_t l1e, * Xen. All active grants can safely be cleaned up when the domain dies.) */ if ( (l1e_get_flags(l1e) & _PAGE_GNTTAB) && - !d->is_shutting_down && !d->is_dying ) + !l1e_owner->is_shutting_down && !l1e_owner->is_dying ) { MEM_LOG("Attempt to implicitly unmap a granted PTE %" PRIpte, l1e_get_intpte(l1e)); - domain_crash(d); + domain_crash(l1e_owner); } /* Remember we didn't take a type-count of foreign writable mappings * to paging-external domains */ if ( (l1e_get_flags(l1e) & _PAGE_RW) && - !(unlikely((e != d) && paging_mode_external(e))) ) + ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) ) { put_page_and_type(page); } @@ -1029,9 +1030,9 @@ void put_page_from_l1e(l1_pgentry_t l1e, if ( unlikely(((page->u.inuse.type_info & PGT_type_mask) == PGT_seg_desc_page)) && unlikely(((page->u.inuse.type_info & PGT_count_mask) != 0)) && - (d == e) ) - { - for_each_vcpu ( d, v ) + (l1e_owner == pg_owner) ) + { + for_each_vcpu ( pg_owner, v ) invalidate_shadow_ldt(v, 1); } put_page(page); @@ -1122,7 +1123,7 @@ static int alloc_l1_table(struct page_in for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) { if ( is_guest_l1_slot(i) && - unlikely(!get_page_from_l1e(pl1e[i], d)) ) + unlikely(!get_page_from_l1e(pl1e[i], d, d)) ) goto fail; adjust_guest_l1e(pl1e[i], d); @@ -1701,7 +1702,7 @@ static int mod_l1_entry(l1_pgentry_t *pl return rc; } - if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) ) + if ( unlikely(!get_page_from_l1e(nl1e, d, FOREIGNDOM)) ) return 0; adjust_guest_l1e(nl1e, d); @@ -4175,7 +4176,7 @@ static int ptwr_emulated_update( /* Check the new PTE. */ nl1e = l1e_from_intpte(val); - if ( unlikely(!get_page_from_l1e(nl1e, d)) ) + if ( unlikely(!get_page_from_l1e(nl1e, d, d)) ) { if ( is_pv_32bit_domain(d) && (bytes == 4) && (unaligned_addr & 4) && !do_cmpxchg && (l1e_get_flags(nl1e) & _PAGE_PRESENT) ) diff -r 2e0834d5e01d -r ab1718dc8024 xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c Thu Jun 04 10:52:06 2009 +0100 +++ b/xen/arch/x86/mm/shadow/multi.c Thu Jun 04 10:52:50 2009 +0100 @@ -816,7 +816,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl if ( !shadow_mode_refcounts(d) ) return 1; - res = get_page_from_l1e(sl1e, d); + res = get_page_from_l1e(sl1e, d, d); // If a privileged domain is attempting to install a map of a page it does // not own, we let it succeed anyway. @@ -828,7 +828,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl (d != owner) && IS_PRIV_FOR(d, owner)) { - res = get_page_from_l1e(sl1e, owner); + res = get_page_from_l1e(sl1e, d, owner); SHADOW_PRINTK("privileged domain %d installs map of mfn %05lx " "which is owned by domain %d: %s\n", d->domain_id, mfn_x(mfn), owner->domain_id, diff -r 2e0834d5e01d -r ab1718dc8024 xen/include/asm-x86/mm.h --- a/xen/include/asm-x86/mm.h Thu Jun 04 10:52:06 2009 +0100 +++ b/xen/include/asm-x86/mm.h Thu Jun 04 10:52:50 2009 +0100 @@ -285,8 +285,9 @@ int get_page_type(struct page_info *pag int get_page_type(struct page_info *page, unsigned long type); int put_page_type_preemptible(struct page_info *page); int get_page_type_preemptible(struct page_info *page, unsigned long type); -int get_page_from_l1e(l1_pgentry_t l1e, struct domain *d); -void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d); +int get_page_from_l1e( + l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner); +void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner); static inline void put_page_and_type(struct page_info *page) { _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |