[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Handle dynamic IOMMU map/unmap for guests
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1211618522 -3600 # Node ID c684cf331f94573ff5829d9701141cf17414b2b8 # Parent 62f1c837057f33e1e58f90bbe90f33335a89558f Handle dynamic IOMMU map/unmap for guests Perform IOMMU map/unmap when (a) frame type changes, (b) memory reservation changes, and (c) a grant reference is newly mapped or completely unmapped from a domain. Signed-off-by: Espen Skoglund <espen.skoglund@xxxxxxxxxxxxx> --- xen/arch/x86/mm.c | 14 +++++++++++++ xen/arch/x86/mm/p2m.c | 21 +++++++++++++++++-- xen/common/grant_table.c | 50 ++++++++++++++++++++++++++++++++++++++++------- xen/common/memory.c | 17 +++++---------- 4 files changed, 82 insertions(+), 20 deletions(-) diff -r 62f1c837057f -r c684cf331f94 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Sat May 24 09:37:35 2008 +0100 +++ b/xen/arch/x86/mm.c Sat May 24 09:42:02 2008 +0100 @@ -1939,6 +1939,20 @@ int get_page_type(struct page_info *page } while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) ); + if ( unlikely((x & PGT_type_mask) != type) ) + { + /* Special pages should not be accessible from devices. */ + struct domain *d = page_get_owner(page); + if ( d && unlikely(need_iommu(d)) ) + { + if ( (x & PGT_type_mask) == PGT_writable_page ) + iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page))); + else if ( type == PGT_writable_page ) + iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)), + page_to_mfn(page)); + } + } + if ( unlikely(!(nx & PGT_validated)) ) { /* Try to validate page type; drop the new reference on failure. */ diff -r 62f1c837057f -r c684cf331f94 xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c Sat May 24 09:37:35 2008 +0100 +++ b/xen/arch/x86/mm/p2m.c Sat May 24 09:42:02 2008 +0100 @@ -325,7 +325,7 @@ p2m_set_entry(struct domain *d, unsigned if ( mfn_valid(mfn) && (gfn > d->arch.p2m->max_mapped_pfn) ) d->arch.p2m->max_mapped_pfn = gfn; - if ( iommu_enabled && is_hvm_domain(d) ) + if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) ) { if ( p2mt == p2m_ram_rw ) for ( i = 0; i < (1UL << page_order); i++ ) @@ -868,7 +868,12 @@ p2m_remove_page(struct domain *d, unsign unsigned long i; if ( !paging_mode_translate(d) ) + { + if ( need_iommu(d) ) + for ( i = 0; i < (1 << page_order); i++ ) + iommu_unmap_page(d, mfn + i); return; + } P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn); @@ -899,7 +904,19 @@ guest_physmap_add_entry(struct domain *d int rc = 0; if ( !paging_mode_translate(d) ) - return -EINVAL; + { + if ( need_iommu(d) && t == p2m_ram_rw ) + { + for ( i = 0; i < (1 << page_order); i++ ) + if ( (rc = iommu_map_page(d, mfn + i, mfn + i)) != 0 ) + { + while ( i-- > 0 ) + iommu_unmap_page(d, mfn + i); + return rc; + } + } + return 0; + } #if CONFIG_PAGING_LEVELS == 3 /* diff -r 62f1c837057f -r c684cf331f94 xen/common/grant_table.c --- a/xen/common/grant_table.c Sat May 24 09:37:35 2008 +0100 +++ b/xen/common/grant_table.c Sat May 24 09:42:02 2008 +0100 @@ -196,8 +196,9 @@ __gnttab_map_grant_ref( struct domain *ld, *rd; struct vcpu *led; int handle; - unsigned long frame = 0; + unsigned long frame = 0, nr_gets = 0; int rc = GNTST_okay; + u32 old_pin; unsigned int cache_flags; struct active_grant_entry *act; struct grant_mapping *mt; @@ -318,6 +319,7 @@ __gnttab_map_grant_ref( } } + old_pin = act->pin; if ( op->flags & GNTMAP_device_map ) act->pin += (op->flags & GNTMAP_readonly) ? GNTPIN_devr_inc : GNTPIN_devw_inc; @@ -361,20 +363,17 @@ __gnttab_map_grant_ref( rc = GNTST_general_error; goto undo_out; } - + + nr_gets++; if ( op->flags & GNTMAP_host_map ) { rc = create_grant_host_mapping(op->host_addr, frame, op->flags, 0); if ( rc != GNTST_okay ) - { - if ( gnttab_host_mapping_get_page_type(op, ld, rd) ) - put_page_type(mfn_to_page(frame)); - put_page(mfn_to_page(frame)); goto undo_out; - } if ( op->flags & GNTMAP_device_map ) { + nr_gets++; (void)get_page(mfn_to_page(frame), rd); if ( !(op->flags & GNTMAP_readonly) ) get_page_type(mfn_to_page(frame), PGT_writable_page); @@ -382,6 +381,17 @@ __gnttab_map_grant_ref( } } + if ( need_iommu(ld) && + !(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) && + (act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) ) + { + if ( iommu_map_page(ld, mfn_to_gmfn(ld, frame), frame) ) + { + rc = GNTST_general_error; + goto undo_out; + } + } + TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom); mt = &maptrack_entry(ld->grant_table, handle); @@ -397,6 +407,19 @@ __gnttab_map_grant_ref( return; undo_out: + if ( nr_gets > 1 ) + { + if ( !(op->flags & GNTMAP_readonly) ) + put_page_type(mfn_to_page(frame)); + put_page(mfn_to_page(frame)); + } + if ( nr_gets > 0 ) + { + if ( gnttab_host_mapping_get_page_type(op, ld, rd) ) + put_page_type(mfn_to_page(frame)); + put_page(mfn_to_page(frame)); + } + spin_lock(&rd->grant_table->lock); act = &active_entry(rd->grant_table, op->ref); @@ -451,6 +474,7 @@ __gnttab_unmap_common( struct active_grant_entry *act; grant_entry_t *sha; s16 rc = 0; + u32 old_pin; ld = current->domain; @@ -497,6 +521,7 @@ __gnttab_unmap_common( act = &active_entry(rd->grant_table, op->map->ref); sha = &shared_entry(rd->grant_table, op->map->ref); + old_pin = act->pin; if ( op->frame == 0 ) { @@ -532,6 +557,17 @@ __gnttab_unmap_common( act->pin -= GNTPIN_hstr_inc; else act->pin -= GNTPIN_hstw_inc; + } + + if ( need_iommu(ld) && + (old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) && + !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) ) + { + if ( iommu_unmap_page(ld, mfn_to_gmfn(ld, op->frame)) ) + { + rc = GNTST_general_error; + goto unmap_out; + } } /* If just unmapped a writable mapping, mark as dirtied */ diff -r 62f1c837057f -r c684cf331f94 xen/common/memory.c --- a/xen/common/memory.c Sat May 24 09:37:35 2008 +0100 +++ b/xen/common/memory.c Sat May 24 09:42:02 2008 +0100 @@ -124,12 +124,9 @@ static void populate_physmap(struct memo } mfn = page_to_mfn(page); - - if ( unlikely(paging_mode_translate(d)) ) - { - guest_physmap_add_page(d, gpfn, mfn, a->extent_order); - } - else + guest_physmap_add_page(d, gpfn, mfn, a->extent_order); + + if ( !paging_mode_translate(d) ) { for ( j = 0; j < (1 << a->extent_order); j++ ) set_gpfn_from_mfn(mfn + j, gpfn + j); @@ -436,11 +433,9 @@ static long memory_exchange(XEN_GUEST_HA &gpfn, exch.out.extent_start, (i<<out_chunk_order)+j, 1); mfn = page_to_mfn(page); - if ( unlikely(paging_mode_translate(d)) ) - { - guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order); - } - else + guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order); + + if ( !paging_mode_translate(d) ) { for ( k = 0; k < (1UL << exch.out.extent_order); k++ ) set_gpfn_from_mfn(mfn + k, gpfn + k); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |