[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IOMMU] dynamic VTd page table for HVM guest



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1251959369 -3600
# Node ID cfcfc0856b2f3fd57c3b5ff3e4e27106b2c72888
# Parent  52fd5baad59330b28e07b43de18d28206a9e63aa
[IOMMU] dynamic VTd page table for HVM guest

This patch makes HVM's VTd page table dynamic just like what PV guest
does, so that avoid the overhead of maintaining page table until a PCI
device is truly assigned to the HVM guest.

Signed-Off-By: Zhai, Edwin <edwin.zhai@xxxxxxxxx>
---
 xen/arch/ia64/xen/domain.c                    |    2 -
 xen/arch/ia64/xen/mm.c                        |    4 +-
 xen/arch/x86/mm/hap/p2m-ept.c                 |    2 -
 xen/arch/x86/mm/p2m.c                         |    2 -
 xen/drivers/passthrough/amd/iommu_map.c       |   52 --------------------------
 xen/drivers/passthrough/amd/pci_amd_iommu.c   |    2 -
 xen/drivers/passthrough/iommu.c               |    4 +-
 xen/drivers/passthrough/pci.c                 |    2 -
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |    1 
 xen/include/xen/sched.h                       |    2 -
 10 files changed, 9 insertions(+), 64 deletions(-)

diff -r 52fd5baad593 -r cfcfc0856b2f xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Wed Sep 02 16:15:05 2009 +0100
+++ b/xen/arch/ia64/xen/domain.c        Thu Sep 03 07:29:29 2009 +0100
@@ -669,7 +669,7 @@ void arch_domain_destroy(struct domain *
                free_xenheap_pages(d->shared_info,
                                   get_order_from_shift(XSI_SHIFT));
 
-       if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) )     {
+       if ( iommu_enabled && need_iommu(d) )   {
                pci_release_devices(d);
                iommu_domain_destroy(d);
        }
diff -r 52fd5baad593 -r cfcfc0856b2f xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Wed Sep 02 16:15:05 2009 +0100
+++ b/xen/arch/ia64/xen/mm.c    Thu Sep 03 07:29:29 2009 +0100
@@ -1479,7 +1479,7 @@ zap_domain_page_one(struct domain *d, un
     if(!mfn_valid(mfn))
         return;
 
-    if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) ){
+    if ( iommu_enabled && need_iommu(d) ){
         int i, j;
         j = 1 << (PAGE_SHIFT-PAGE_SHIFT_4K);
         for(i = 0 ; i < j; i++)
@@ -2885,7 +2885,7 @@ __guest_physmap_add_page(struct domain *
     smp_mb();
     assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn,
                                ASSIGN_writable | ASSIGN_pgc_allocated);
-    if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) ){
+    if ( iommu_enabled && need_iommu(d) ){
         int i, j;
         j = 1 << (PAGE_SHIFT-PAGE_SHIFT_4K);
         for(i = 0 ; i < j; i++)
diff -r 52fd5baad593 -r cfcfc0856b2f xen/arch/x86/mm/hap/p2m-ept.c
--- a/xen/arch/x86/mm/hap/p2m-ept.c     Wed Sep 02 16:15:05 2009 +0100
+++ b/xen/arch/x86/mm/hap/p2m-ept.c     Thu Sep 03 07:29:29 2009 +0100
@@ -282,7 +282,7 @@ out:
     ept_sync_domain(d);
 
     /* Now the p2m table is not shared with vt-d page table */
-    if ( iommu_enabled && is_hvm_domain(d) && need_modify_vtd_table )
+    if ( iommu_enabled && need_iommu(d) && need_modify_vtd_table )
     {
         if ( p2mt == p2m_ram_rw )
         {
diff -r 52fd5baad593 -r cfcfc0856b2f xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Wed Sep 02 16:15:05 2009 +0100
+++ b/xen/arch/x86/mm/p2m.c     Thu Sep 03 07:29:29 2009 +0100
@@ -1199,7 +1199,7 @@ p2m_set_entry(struct domain *d, unsigned
          && (gfn + (1UL << page_order) - 1 > d->arch.p2m->max_mapped_pfn) )
         d->arch.p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
 
-    if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) )
+    if ( iommu_enabled && need_iommu(d) )
     {
         if ( p2mt == p2m_ram_rw )
             for ( i = 0; i < (1UL << page_order); i++ )
diff -r 52fd5baad593 -r cfcfc0856b2f xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c   Wed Sep 02 16:15:05 2009 +0100
+++ b/xen/drivers/passthrough/amd/iommu_map.c   Thu Sep 03 07:29:29 2009 +0100
@@ -555,58 +555,6 @@ int amd_iommu_reserve_domain_unity_map(
     return 0;
 }
 
-int amd_iommu_sync_p2m(struct domain *d)
-{
-    unsigned long mfn, gfn;
-    u64 iommu_l2e;
-    struct page_info *page;
-    struct hvm_iommu *hd;
-    int iw = IOMMU_IO_WRITE_ENABLED;
-    int ir = IOMMU_IO_READ_ENABLED;
-
-    if ( !is_hvm_domain(d) )
-        return 0;
-
-    hd = domain_hvm_iommu(d);
-
-    spin_lock(&hd->mapping_lock);
-
-    if ( hd->p2m_synchronized )
-        goto out;
-
-    spin_lock(&d->page_alloc_lock);
-
-    page_list_for_each ( page, &d->page_list )
-    {
-        mfn = page_to_mfn(page);
-        gfn = get_gpfn_from_mfn(mfn);
-
-        if ( gfn == INVALID_M2P_ENTRY )
-            continue;
-
-        iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
-
-        if ( iommu_l2e == 0 )
-        {
-            spin_unlock(&d->page_alloc_lock);
-            spin_unlock(&hd->mapping_lock);
-            amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
-            domain_crash(d);
-            return -EFAULT;
-        }
-
-        set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
-    }
-
-    spin_unlock(&d->page_alloc_lock);
-
-    hd->p2m_synchronized = 1;
-
-out:
-    spin_unlock(&hd->mapping_lock);
-    return 0;
-}
-
 void invalidate_all_iommu_pages(struct domain *d)
 {
     u32 cmd[4], entry;
diff -r 52fd5baad593 -r cfcfc0856b2f xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c       Wed Sep 02 16:15:05 
2009 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c       Thu Sep 03 07:29:29 
2009 +0100
@@ -311,8 +311,6 @@ static int amd_iommu_assign_device(struc
     int bdf = (bus << 8) | devfn;
     int req_id = ivrs_mappings[bdf].dte_requestor_id;
 
-    amd_iommu_sync_p2m(d);
-
     if ( ivrs_mappings[req_id].unity_map_enable )
     {
         amd_iommu_reserve_domain_unity_map(
diff -r 52fd5baad593 -r cfcfc0856b2f xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c   Wed Sep 02 16:15:05 2009 +0100
+++ b/xen/drivers/passthrough/iommu.c   Thu Sep 03 07:29:29 2009 +0100
@@ -137,7 +137,7 @@ int assign_device(struct domain *d, u8 b
     if ( (rc = hd->platform_ops->assign_device(d, bus, devfn)) )
         goto done;
 
-    if ( has_arch_pdevs(d) && !is_hvm_domain(d) && !need_iommu(d) )
+    if ( has_arch_pdevs(d) && !need_iommu(d) )
     {
         d->need_iommu = 1;
         rc = iommu_populate_page_table(d);
@@ -184,7 +184,7 @@ void iommu_domain_destroy(struct domain 
     if ( !iommu_enabled || !hd->platform_ops )
         return;
 
-    if ( !is_hvm_domain(d) && !need_iommu(d)  )
+    if ( !need_iommu(d)  )
         return;
 
     if ( need_iommu(d) )
diff -r 52fd5baad593 -r cfcfc0856b2f xen/drivers/passthrough/pci.c
--- a/xen/drivers/passthrough/pci.c     Wed Sep 02 16:15:05 2009 +0100
+++ b/xen/drivers/passthrough/pci.c     Thu Sep 03 07:29:29 2009 +0100
@@ -202,7 +202,7 @@ static void pci_clean_dpci_irqs(struct d
     if ( !iommu_enabled )
         return;
 
-    if ( !is_hvm_domain(d) && !need_iommu(d) )
+    if ( !need_iommu(d) )
         return;
 
     spin_lock(&d->event_lock);
diff -r 52fd5baad593 -r cfcfc0856b2f 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Wed Sep 02 16:15:05 
2009 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Thu Sep 03 07:29:29 
2009 +0100
@@ -63,7 +63,6 @@ u64 amd_iommu_get_next_table_from_pte(u3
 u64 amd_iommu_get_next_table_from_pte(u32 *entry);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
         unsigned long phys_addr, unsigned long size, int iw, int ir);
-int amd_iommu_sync_p2m(struct domain *d);
 void invalidate_all_iommu_pages(struct domain *d);
 
 /* device table functions */
diff -r 52fd5baad593 -r cfcfc0856b2f xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Wed Sep 02 16:15:05 2009 +0100
+++ b/xen/include/xen/sched.h   Thu Sep 03 07:29:29 2009 +0100
@@ -557,7 +557,7 @@ uint64_t get_cpu_idle_time(unsigned int 
 
 #define is_hvm_domain(d) ((d)->is_hvm)
 #define is_hvm_vcpu(v)   (is_hvm_domain(v->domain))
-#define need_iommu(d)    ((d)->need_iommu && !(d)->is_hvm)
+#define need_iommu(d)    ((d)->need_iommu)
 
 void set_vcpu_migration_delay(unsigned int delay);
 unsigned int get_vcpu_migration_delay(void);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.