diff -r 8f81bdd57afe xen/drivers/passthrough/amd/iommu_map.c --- a/xen/drivers/passthrough/amd/iommu_map.c Thu Sep 03 09:51:37 2009 +0100 +++ b/xen/drivers/passthrough/amd/iommu_map.c Fri Sep 04 14:25:49 2009 +0200 @@ -459,9 +459,6 @@ int amd_iommu_map_page(struct domain *d, spin_lock(&hd->mapping_lock); - if ( is_hvm_domain(d) && !hd->p2m_synchronized ) - goto out; - iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn); if ( iommu_l2e == 0 ) { @@ -472,7 +469,6 @@ int amd_iommu_map_page(struct domain *d, } set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir); -out: spin_unlock(&hd->mapping_lock); return 0; } @@ -487,12 +483,6 @@ int amd_iommu_unmap_page(struct domain * BUG_ON( !hd->root_table ); spin_lock(&hd->mapping_lock); - - if ( is_hvm_domain(d) && !hd->p2m_synchronized ) - { - spin_unlock(&hd->mapping_lock); - return 0; - } iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn); diff -r 8f81bdd57afe xen/drivers/passthrough/iommu.c --- a/xen/drivers/passthrough/iommu.c Thu Sep 03 09:51:37 2009 +0100 +++ b/xen/drivers/passthrough/iommu.c Fri Sep 04 14:25:49 2009 +0200 @@ -158,7 +158,8 @@ static int iommu_populate_page_table(str page_list_for_each ( page, &d->page_list ) { - if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page ) + if ( is_hvm_domain(d) || + (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page ) { rc = hd->platform_ops->map_page( d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page)); diff -r 8f81bdd57afe xen/include/xen/hvm/iommu.h --- a/xen/include/xen/hvm/iommu.h Thu Sep 03 09:51:37 2009 +0100 +++ b/xen/include/xen/hvm/iommu.h Fri Sep 04 14:25:49 2009 +0200 @@ -41,7 +41,6 @@ struct hvm_iommu { int domain_id; int paging_mode; struct page_info *root_table; - bool_t p2m_synchronized; /* iommu_ops */ struct iommu_ops *platform_ops;