diff -r 8ab9285a8354 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Thu Apr 22 09:47:44 2010 +0100 +++ b/xen/arch/x86/mm.c Mon Apr 26 11:07:49 2010 +0100 @@ -167,7 +167,8 @@ ((d != dom_io) && \ (rangeset_is_empty((d)->iomem_caps) && \ rangeset_is_empty((d)->arch.ioport_caps) && \ - !has_arch_pdevs(d)) ? \ + !has_arch_pdevs(d) && \ + !is_hvm_domain(d)) ? \ L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS)) #ifdef CONFIG_COMPAT diff -r 8ab9285a8354 xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c Thu Apr 22 09:47:44 2010 +0100 +++ b/xen/arch/x86/mm/shadow/multi.c Mon Apr 26 11:07:49 2010 +0100 @@ -562,7 +562,7 @@ * For HVM domains with direct access to MMIO areas, set the correct * caching attributes in the shadows to match what was asked for. */ - if ( (level == 1) && is_hvm_domain(d) && has_arch_pdevs(d) && + if ( (level == 1) && is_hvm_domain(d) && !is_xen_heap_mfn(mfn_x(target_mfn)) ) { unsigned int type; @@ -578,20 +578,24 @@ sflags |= pat_type_2_pte_flags(type); else if ( d->arch.hvm_domain.is_in_uc_mode ) sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE); - else if ( p2mt == p2m_mmio_direct ) - sflags |= get_pat_flags(v, - gflags, - gfn_to_paddr(target_gfn), - ((paddr_t)mfn_x(target_mfn)) << PAGE_SHIFT, - MTRR_TYPE_UNCACHABLE); - else if ( iommu_snoop ) - sflags |= pat_type_2_pte_flags(PAT_TYPE_WRBACK); - else - sflags |= get_pat_flags(v, - gflags, - gfn_to_paddr(target_gfn), - ((paddr_t)mfn_x(target_mfn)) << PAGE_SHIFT, - NO_HARDCODE_MEM_TYPE); + else + if ( has_arch_pdevs(d) ) + { + if ( p2mt == p2m_mmio_direct ) + sflags |= get_pat_flags(v, + gflags, + gfn_to_paddr(target_gfn), + ((paddr_t)mfn_x(target_mfn)) << PAGE_SHIFT, + MTRR_TYPE_UNCACHABLE); + else if ( iommu_snoop ) + sflags |= pat_type_2_pte_flags(PAT_TYPE_WRBACK); + else + sflags |= get_pat_flags(v, + gflags, + gfn_to_paddr(target_gfn), + ((paddr_t)mfn_x(target_mfn)) << PAGE_SHIFT, + NO_HARDCODE_MEM_TYPE); + } } // Set the A&D bits for higher level shadows.