[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] xsm: Revert "Fix xsm_mmu_* and xsm_update_va_mapping hooks"
# HG changeset patch # User Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> # Date 1322862356 28800 # Node ID 2bff84f3334136f0deb2951281297491e3aa158a # Parent 841c330b23a64461aa246ed024e57082f1cd051b xsm: Revert "Fix xsm_mmu_* and xsm_update_va_mapping hooks" This reverts 23220:56a3b9c7367f, which removes all validation of the target pages in the mapping. This crash was solved by properly marking pages without known SIDs in 22207:20f139010445. Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> Committed-by: Keir Fraser <keir@xxxxxxx> --- diff -r 841c330b23a6 -r 2bff84f33341 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Fri Dec 02 13:45:15 2011 -0800 +++ b/xen/arch/x86/mm.c Fri Dec 02 13:45:56 2011 -0800 @@ -3517,6 +3517,9 @@ { p2m_type_t p2mt; + rc = xsm_mmu_normal_update(d, pg_owner, req.val); + if ( rc ) + break; rc = -EINVAL; req.ptr -= cmd; @@ -3545,14 +3548,6 @@ (unsigned long)(req.ptr & ~PAGE_MASK)); page = mfn_to_page(mfn); - rc = xsm_mmu_normal_update(d, req.val, page); - if ( rc ) { - unmap_domain_page_with_cache(va, &mapcache); - put_page(page); - put_gfn(pt_owner, gmfn); - break; - } - if ( page_lock(page) ) { switch ( page->u.inuse.type_info & PGT_type_mask ) @@ -3740,6 +3735,10 @@ mfn = req.ptr >> PAGE_SHIFT; gpfn = req.val; + rc = xsm_mmu_machphys_update(d, mfn); + if ( rc ) + break; + if ( unlikely(!get_page_from_pagenr(mfn, pg_owner)) ) { MEM_LOG("Could not get page for mach->phys update"); @@ -3754,10 +3753,6 @@ break; } - rc = xsm_mmu_machphys_update(d, mfn_to_page(mfn)); - if ( rc ) - break; - set_gpfn_from_mfn(mfn, gpfn); paging_mark_dirty(pg_owner, mfn); @@ -4384,6 +4379,10 @@ perfc_incr(calls_to_update_va); + rc = xsm_update_va_mapping(d, pg_owner, val); + if ( rc ) + return rc; + rc = -EINVAL; pl1e = guest_map_l1e(v, va, &gl1mfn); if ( unlikely(!pl1e || !get_page_from_pagenr(gl1mfn, d)) ) @@ -4403,13 +4402,6 @@ goto out; } - rc = xsm_update_va_mapping(d, val, gl1pg); - if ( rc ) { - page_unlock(gl1pg); - put_page(gl1pg); - goto out; - } - rc = mod_l1_entry(pl1e, val, gl1mfn, 0, v, pg_owner); page_unlock(gl1pg); diff -r 841c330b23a6 -r 2bff84f33341 xen/include/xsm/xsm.h --- a/xen/include/xsm/xsm.h Fri Dec 02 13:45:15 2011 -0800 +++ b/xen/include/xsm/xsm.h Fri Dec 02 13:45:56 2011 -0800 @@ -141,12 +141,11 @@ int (*getidletime) (void); int (*machine_memory_map) (void); int (*domain_memory_map) (struct domain *d); - int (*mmu_normal_update) (struct domain *d, - intpte_t fpte, struct page_info *page); - int (*mmu_machphys_update) (struct domain *d, struct page_info *page); - int (*update_va_mapping) (struct domain *d, - l1_pgentry_t pte, - struct page_info *page); + int (*mmu_normal_update) (struct domain *d, struct domain *f, + intpte_t fpte); + int (*mmu_machphys_update) (struct domain *d, unsigned long mfn); + int (*update_va_mapping) (struct domain *d, struct domain *f, + l1_pgentry_t pte); int (*add_to_physmap) (struct domain *d1, struct domain *d2); int (*sendtrigger) (struct domain *d); int (*bind_pt_irq) (struct domain *d, struct xen_domctl_bind_pt_irq *bind); @@ -595,22 +594,21 @@ return xsm_call(domain_memory_map(d)); } -static inline int xsm_mmu_normal_update (struct domain *d, - intpte_t fpte, struct page_info *page) +static inline int xsm_mmu_normal_update (struct domain *d, struct domain *f, + intpte_t fpte) { - return xsm_call(mmu_normal_update(d, fpte, page)); + return xsm_call(mmu_normal_update(d, f, fpte)); } -static inline int xsm_mmu_machphys_update (struct domain *d, struct page_info *page) +static inline int xsm_mmu_machphys_update (struct domain *d, unsigned long mfn) { - return xsm_call(mmu_machphys_update(d, page)); + return xsm_call(mmu_machphys_update(d, mfn)); } -static inline int xsm_update_va_mapping(struct domain *d, - l1_pgentry_t pte, - struct page_info *page) +static inline int xsm_update_va_mapping(struct domain *d, struct domain *f, + l1_pgentry_t pte) { - return xsm_call(update_va_mapping(d, pte, page)); + return xsm_call(update_va_mapping(d, f, pte)); } static inline int xsm_add_to_physmap(struct domain *d1, struct domain *d2) diff -r 841c330b23a6 -r 2bff84f33341 xen/xsm/dummy.c --- a/xen/xsm/dummy.c Fri Dec 02 13:45:15 2011 -0800 +++ b/xen/xsm/dummy.c Fri Dec 02 13:45:56 2011 -0800 @@ -400,20 +400,19 @@ return 0; } -static int dummy_mmu_normal_update (struct domain *d, - intpte_t fpte, struct page_info *page) +static int dummy_mmu_normal_update (struct domain *d, struct domain *f, + intpte_t fpte) { return 0; } -static int dummy_mmu_machphys_update (struct domain *d, struct page_info *page) +static int dummy_mmu_machphys_update (struct domain *d, unsigned long mfn) { return 0; } -static int dummy_update_va_mapping (struct domain *d, - l1_pgentry_t pte, - struct page_info *page) +static int dummy_update_va_mapping (struct domain *d, struct domain *f, + l1_pgentry_t pte) { return 0; } diff -r 841c330b23a6 -r 2bff84f33341 xen/xsm/flask/hooks.c --- a/xen/xsm/flask/hooks.c Fri Dec 02 13:45:15 2011 -0800 +++ b/xen/xsm/flask/hooks.c Fri Dec 02 13:45:56 2011 -0800 @@ -348,6 +348,26 @@ return rc; } +static int get_mfn_sid(unsigned long mfn, u32 *sid) +{ + int rc = 0; + struct page_info *page; + + if ( mfn_valid(mfn) ) + { + /*mfn is valid if this is a page that Xen is tracking!*/ + page = mfn_to_page(mfn); + rc = get_page_sid(page, sid); + } + else + { + /*Possibly an untracked IO page?*/ + rc = security_iomem_sid(mfn, sid); + } + + return rc; +} + static int flask_memory_adjust_reservation(struct domain *d1, struct domain *d2) { return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__ADJUST); @@ -987,11 +1007,12 @@ return domain_has_perm(current->domain, d, SECCLASS_MMU, MMU__MEMORYMAP); } -static int flask_mmu_normal_update(struct domain *d, - intpte_t fpte, struct page_info *page) +static int flask_mmu_normal_update(struct domain *d, struct domain *f, + intpte_t fpte) { int rc = 0; u32 map_perms = MMU__MAP_READ; + unsigned long fmfn; struct domain_security_struct *dsec; u32 fsid; @@ -1000,38 +1021,42 @@ if ( l1e_get_flags(l1e_from_intpte(fpte)) & _PAGE_RW ) map_perms |= MMU__MAP_WRITE; - rc = get_page_sid(page, &fsid); + fmfn = get_gfn_untyped(f, l1e_get_pfn(l1e_from_intpte(fpte))); + + rc = get_mfn_sid(fmfn, &fsid); if ( rc ) return rc; return avc_has_perm(dsec->sid, fsid, SECCLASS_MMU, map_perms, NULL); } -static int flask_mmu_machphys_update(struct domain *d, struct page_info *page) +static int flask_mmu_machphys_update(struct domain *d, unsigned long mfn) { int rc = 0; u32 psid; struct domain_security_struct *dsec; dsec = d->ssid; - rc = get_page_sid(page, &psid); + rc = get_mfn_sid(mfn, &psid); if ( rc ) return rc; return avc_has_perm(dsec->sid, psid, SECCLASS_MMU, MMU__UPDATEMP, NULL); } -static int flask_update_va_mapping(struct domain *d, - l1_pgentry_t pte, struct page_info *page) +static int flask_update_va_mapping(struct domain *d, struct domain *f, + l1_pgentry_t pte) { int rc = 0; u32 psid; u32 map_perms = MMU__MAP_READ; + unsigned long mfn; struct domain_security_struct *dsec; dsec = d->ssid; - rc = get_page_sid(page, &psid); + mfn = get_gfn_untyped(f, l1e_get_pfn(pte)); + rc = get_mfn_sid(mfn, &psid); if ( rc ) return rc; _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |