[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] AMD IOMMU: Defer IO pagetable construction until device assignment



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1207313645 -3600
# Node ID 85d8d3f5c65110e8000ef48f185e12c02754b101
# Parent  7d617282f18e41afd83b5ce9d40133f12ed75e77
AMD IOMMU: Defer IO pagetable construction until device assignment

During HVM domain creation, I/O page tables are filled by coping p2m
entries from p2m table, which is a useless step for non-passthru
domain. This patch defers I/O page table construction until the moment
of device assignment. In case that pci devices are never assigned or
hot plugged, the unnecessary duplication will be avoided.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
---
 xen/drivers/passthrough/amd/iommu_map.c       |   76 ++++++++++++++++++++++----
 xen/drivers/passthrough/amd/pci_amd_iommu.c   |    5 +
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |    1 
 xen/include/xen/hvm/iommu.h                   |    3 -
 4 files changed, 72 insertions(+), 13 deletions(-)

diff -r 7d617282f18e -r 85d8d3f5c651 xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c   Fri Apr 04 13:10:34 2008 +0100
+++ b/xen/drivers/passthrough/amd/iommu_map.c   Fri Apr 04 13:54:05 2008 +0100
@@ -388,17 +388,17 @@ int amd_iommu_map_page(struct domain *d,
     unsigned long flags;
     u64 maddr;
     struct hvm_iommu *hd = domain_hvm_iommu(d);
-    int iw, ir;
+    int iw = IOMMU_IO_WRITE_ENABLED;
+    int ir = IOMMU_IO_READ_ENABLED;
 
     BUG_ON( !hd->root_table );
 
+    spin_lock_irqsave(&hd->mapping_lock, flags);
+
+    if ( is_hvm_domain(d) && !hd->p2m_synchronized )
+        goto out;
+
     maddr = (u64)mfn << PAGE_SHIFT;
-
-    iw = IOMMU_IO_WRITE_ENABLED;
-    ir = IOMMU_IO_READ_ENABLED;
-
-    spin_lock_irqsave(&hd->mapping_lock, flags);
-
     pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
     if ( pte == NULL )
     {
@@ -409,7 +409,7 @@ int amd_iommu_map_page(struct domain *d,
     }
 
     set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
-
+out:
     spin_unlock_irqrestore(&hd->mapping_lock, flags);
     return 0;
 }
@@ -425,10 +425,16 @@ int amd_iommu_unmap_page(struct domain *
 
     BUG_ON( !hd->root_table );
 
+    spin_lock_irqsave(&hd->mapping_lock, flags);
+
+    if ( is_hvm_domain(d) && !hd->p2m_synchronized )
+    {
+        spin_unlock_irqrestore(&hd->mapping_lock, flags);
+        return 0;
+    }
+
     requestor_id = hd->domain_id;
     io_addr = (u64)gfn << PAGE_SHIFT;
-
-    spin_lock_irqsave(&hd->mapping_lock, flags);
 
     pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
     if ( pte == NULL )
@@ -486,3 +492,53 @@ int amd_iommu_reserve_domain_unity_map(
     spin_unlock_irqrestore(&hd->mapping_lock, flags);
     return 0;
 }
+
+int amd_iommu_sync_p2m(struct domain *d)
+{
+    unsigned long mfn, gfn, flags;
+    void *pte;
+    u64 maddr;
+    struct list_head *entry;
+    struct page_info *page;
+    struct hvm_iommu *hd;
+    int iw = IOMMU_IO_WRITE_ENABLED;
+    int ir = IOMMU_IO_READ_ENABLED;
+
+    if ( !is_hvm_domain(d) )
+        return;
+
+    hd = domain_hvm_iommu(d);
+
+    spin_lock_irqsave(&hd->mapping_lock, flags);
+
+    if ( hd->p2m_synchronized )
+        goto out;
+
+    for ( entry = d->page_list.next; entry != &d->page_list;
+            entry = entry->next )
+    {
+        page = list_entry(entry, struct page_info, list);
+        mfn = page_to_mfn(page);
+        gfn = get_gpfn_from_mfn(mfn);
+
+        if ( gfn == INVALID_M2P_ENTRY )
+            continue;
+
+        maddr = (u64)mfn << PAGE_SHIFT;
+        pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
+        if ( pte == NULL )
+        {
+            dprintk(XENLOG_ERR,
+                    "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn);
+            spin_unlock_irqrestore(&hd->mapping_lock, flags);
+            return -EFAULT;
+        }
+        set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
+    }
+
+    hd->p2m_synchronized = 1;
+
+out:
+    spin_unlock_irqrestore(&hd->mapping_lock, flags);
+    return 0;
+}
diff -r 7d617282f18e -r 85d8d3f5c651 xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c       Fri Apr 04 13:10:34 
2008 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c       Fri Apr 04 13:54:05 
2008 +0100
@@ -553,8 +553,9 @@ int amd_iommu_assign_device(struct domai
 int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
 {
     int bdf = (bus << 8) | devfn;
-    int req_id;
-    req_id = ivrs_mappings[bdf].dte_requestor_id;
+    int req_id = ivrs_mappings[bdf].dte_requestor_id;
+
+    amd_iommu_sync_p2m(d);
 
     if ( ivrs_mappings[req_id].unity_map_enable )
     {
diff -r 7d617282f18e -r 85d8d3f5c651 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Fri Apr 04 13:10:34 
2008 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Fri Apr 04 13:54:05 
2008 +0100
@@ -57,6 +57,7 @@ void *amd_iommu_get_vptr_from_page_table
 void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
         unsigned long phys_addr, unsigned long size, int iw, int ir);
+int amd_iommu_sync_p2m(struct domain *d);
 
 /* device table functions */
 void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr,
diff -r 7d617282f18e -r 85d8d3f5c651 xen/include/xen/hvm/iommu.h
--- a/xen/include/xen/hvm/iommu.h       Fri Apr 04 13:10:34 2008 +0100
+++ b/xen/include/xen/hvm/iommu.h       Fri Apr 04 13:54:05 2008 +0100
@@ -48,9 +48,10 @@ struct hvm_iommu {
     int domain_id;
     int paging_mode;
     void *root_table;
+    bool_t p2m_synchronized;
 
     /* iommu_ops */
     struct iommu_ops *platform_ops;
 };
 
-#endif // __ASM_X86_HVM_IOMMU_H__
+#endif /* __ASM_X86_HVM_IOMMU_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.