[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] AMD IOMMU: Allocate I/O pagetable from domheap instead of xenheap



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1231516677 0
# Node ID 1dfc48a8c361051d5bce8156a7942ffaaa42c89d
# Parent  6d040d138e8fb7958bbf7bfd0fee507addb4b99c
AMD IOMMU: Allocate I/O pagetable from domheap instead of xenheap

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
---
 xen/drivers/passthrough/amd/iommu_init.c      |   28 ++--
 xen/drivers/passthrough/amd/iommu_intr.c      |   20 +--
 xen/drivers/passthrough/amd/iommu_map.c       |  158 ++++++++++++++------------
 xen/drivers/passthrough/amd/pci_amd_iommu.c   |  127 ++++++++------------
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |   38 +++++-
 xen/include/xen/hvm/iommu.h                   |    2 
 6 files changed, 203 insertions(+), 170 deletions(-)

diff -r 6d040d138e8f -r 1dfc48a8c361 xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c  Fri Jan 09 13:00:10 2009 +0000
+++ b/xen/drivers/passthrough/amd/iommu_init.c  Fri Jan 09 15:57:57 2009 +0000
@@ -535,10 +535,11 @@ static void __init deallocate_iommu_tabl
 static void __init deallocate_iommu_table_struct(
     struct table_struct *table)
 {
+    int order = 0;
     if ( table->buffer )
     {
-        free_xenheap_pages(table->buffer,
-            get_order_from_bytes(table->alloc_size));
+        order = get_order_from_bytes(table->alloc_size);
+        __free_amd_iommu_tables(table->buffer, order);
         table->buffer = NULL;
     }
 }
@@ -552,16 +553,19 @@ static int __init allocate_iommu_table_s
 static int __init allocate_iommu_table_struct(struct table_struct *table,
                                               const char *name)
 {
-    table->buffer = (void *) alloc_xenheap_pages(
-        get_order_from_bytes(table->alloc_size));
-
-    if ( !table->buffer )
-    {
-        amd_iov_error("Error allocating %s\n", name);
-        return -ENOMEM;
-    }
-
-    memset(table->buffer, 0, table->alloc_size);
+    int order = 0;
+    if ( table->buffer == NULL )
+    {
+        order = get_order_from_bytes(table->alloc_size);
+        table->buffer = __alloc_amd_iommu_tables(order);
+
+        if ( table->buffer == NULL )
+        {
+            amd_iov_error("Error allocating %s\n", name);
+            return -ENOMEM;
+        }
+        memset(table->buffer, 0, PAGE_SIZE * (1UL << order));
+    }
     return 0;
 }
 
diff -r 6d040d138e8f -r 1dfc48a8c361 xen/drivers/passthrough/amd/iommu_intr.c
--- a/xen/drivers/passthrough/amd/iommu_intr.c  Fri Jan 09 13:00:10 2009 +0000
+++ b/xen/drivers/passthrough/amd/iommu_intr.c  Fri Jan 09 15:57:57 2009 +0000
@@ -22,6 +22,7 @@
 #include <asm/amd-iommu.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
 
+#define INTREMAP_TABLE_ORDER    1
 DEFINE_SPINLOCK(int_remap_table_lock);
 void *int_remap_table = NULL;
 
@@ -112,14 +113,17 @@ int __init amd_iommu_setup_intremap_tabl
     unsigned long flags;
 
     spin_lock_irqsave(&int_remap_table_lock, flags);
+
     if ( int_remap_table == NULL )
-        int_remap_table = (void *)alloc_xenheap_pages(1);
-    if ( !int_remap_table )
-    {
-        spin_unlock_irqrestore(&int_remap_table_lock, flags);
-        return -ENOMEM;
-    }
-    memset((u8*)int_remap_table, 0, PAGE_SIZE*2);
+    {
+        int_remap_table = __alloc_amd_iommu_tables(INTREMAP_TABLE_ORDER);
+        if ( int_remap_table == NULL )
+        {
+            spin_unlock_irqrestore(&int_remap_table_lock, flags);
+            return -ENOMEM;
+        }
+        memset(int_remap_table, 0, PAGE_SIZE * (1UL << INTREMAP_TABLE_ORDER));
+    }
     spin_unlock_irqrestore(&int_remap_table_lock, flags);
 
     return 0;
@@ -211,7 +215,7 @@ int __init deallocate_intremap_table(voi
     spin_lock_irqsave(&int_remap_table_lock, flags);
     if ( int_remap_table )
     {
-        free_xenheap_pages(int_remap_table, 1);
+        __free_amd_iommu_tables(int_remap_table, INTREMAP_TABLE_ORDER);
         int_remap_table = NULL;
     }
     spin_unlock_irqrestore(&int_remap_table_lock, flags);
diff -r 6d040d138e8f -r 1dfc48a8c361 xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c   Fri Jan 09 13:00:10 2009 +0000
+++ b/xen/drivers/passthrough/amd/iommu_map.c   Fri Jan 09 15:57:57 2009 +0000
@@ -159,21 +159,39 @@ void flush_command_buffer(struct amd_iom
     }
 }
 
-static void clear_page_table_entry_present(u32 *pte)
-{
-    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, pte[0],
-                         IOMMU_PTE_PRESENT_MASK,
-                         IOMMU_PTE_PRESENT_SHIFT, &pte[0]);
-}
-
-static void set_page_table_entry_present(u32 *pte, u64 page_addr,
-                                         int iw, int ir)
+static void clear_iommu_l1e_present(u64 l2e, unsigned long gfn)
+{
+    u32 *l1e;
+    int offset;
+    void *l1_table;
+
+    l1_table = map_domain_page(l2e >> PAGE_SHIFT);
+
+    offset = gfn & (~PTE_PER_TABLE_MASK);
+    l1e = (u32*)(l1_table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE));
+
+    /* clear l1 entry */
+    l1e[0] = l1e[1] = 0;
+
+    unmap_domain_page(l1_table);
+}
+
+static void set_iommu_l1e_present(u64 l2e, unsigned long gfn,
+                                 u64 maddr, int iw, int ir)
 {
     u64 addr_lo, addr_hi;
     u32 entry;
-
-    addr_lo = page_addr & DMA_32BIT_MASK;
-    addr_hi = page_addr >> 32;
+    void *l1_table;
+    int offset;
+    u32 *l1e;
+
+    l1_table = map_domain_page(l2e >> PAGE_SHIFT);
+
+    offset = gfn & (~PTE_PER_TABLE_MASK);
+    l1e = (u32*)((u8*)l1_table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE));
+
+    addr_lo = maddr & DMA_32BIT_MASK;
+    addr_hi = maddr >> 32;
 
     set_field_in_reg_u32((u32)addr_hi, 0,
                          IOMMU_PTE_ADDR_HIGH_MASK,
@@ -186,7 +204,7 @@ static void set_page_table_entry_present
                          IOMMU_CONTROL_DISABLED, entry,
                          IOMMU_PTE_IO_READ_PERMISSION_MASK,
                          IOMMU_PTE_IO_READ_PERMISSION_SHIFT, &entry);
-    pte[1] = entry;
+    l1e[1] = entry;
 
     set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
                          IOMMU_PTE_ADDR_LOW_MASK,
@@ -197,9 +215,10 @@ static void set_page_table_entry_present
     set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
                          IOMMU_PTE_PRESENT_MASK,
                          IOMMU_PTE_PRESENT_SHIFT, &entry);
-    pte[0] = entry;
-}
-
+    l1e[0] = entry;
+
+    unmap_domain_page(l1_table);
+}
 
 static void amd_iommu_set_page_directory_entry(u32 *pde, 
                                                u64 next_ptr, u8 next_level)
@@ -327,7 +346,7 @@ void amd_iommu_set_dev_table_entry(u32 *
     dte[0] = entry;
 }
 
-void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry)
+u64 amd_iommu_get_next_table_from_pte(u32 *entry)
 {
     u64 addr_lo, addr_hi, ptr;
 
@@ -342,7 +361,7 @@ void *amd_iommu_get_vptr_from_page_table
         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT);
 
     ptr = (addr_hi << 32) | (addr_lo << PAGE_SHIFT);
-    return ptr ? maddr_to_virt((unsigned long)ptr) : NULL;
+    return ptr;
 }
 
 static int amd_iommu_is_pte_present(u32 *entry)
@@ -381,54 +400,53 @@ int amd_iommu_is_dte_page_translation_va
                                    IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT));
 }
 
-static void *get_pte_from_page_tables(void *table, int level,
-                                      unsigned long io_pfn)
+static u64 iommu_l2e_from_pfn(struct page_info *table, int level,
+                              unsigned long io_pfn)
 {
     unsigned long offset;
     void *pde = NULL;
-
-    BUG_ON(table == NULL);
-
-    while ( level > 0 )
+    void *table_vaddr;
+    u64 next_table_maddr = 0;
+
+    BUG_ON( table == NULL || level == 0 );
+
+    while ( level > 1 )
     {
         offset = io_pfn >> ((PTE_PER_TABLE_SHIFT *
                              (level - IOMMU_PAGING_MODE_LEVEL_1)));
         offset &= ~PTE_PER_TABLE_MASK;
-        pde = table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE);
-
-        if ( level == 1 )
-            break;
-        if ( !pde )
-            return NULL;
+
+        table_vaddr = map_domain_page(page_to_mfn(table));
+        pde = table_vaddr + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE);
+        next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
+
         if ( !amd_iommu_is_pte_present(pde) )
         {
-            void *next_table = alloc_xenheap_page();
-            if ( next_table == NULL )
-                return NULL;
-            memset(next_table, 0, PAGE_SIZE);
-            if ( *(u64 *)pde == 0 )
+            if ( next_table_maddr == 0 )
             {
-                unsigned long next_ptr = (u64)virt_to_maddr(next_table);
+                table = alloc_amd_iommu_pgtable();
+                if ( table == NULL )
+                    return 0;
+                next_table_maddr = page_to_maddr(table);
                 amd_iommu_set_page_directory_entry(
-                    (u32 *)pde, next_ptr, level - 1);
+                    (u32 *)pde, next_table_maddr, level - 1);
             }
-            else
-            {
-                free_xenheap_page(next_table);
-            }
+            else /* should never reach here */
+                return 0;
         }
-        table = amd_iommu_get_vptr_from_page_table_entry(pde);
+
+        unmap_domain_page(table_vaddr);
+        table = maddr_to_page(next_table_maddr);
         level--;
     }
 
-    return pde;
+    return next_table_maddr;
 }
 
 int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
 {
-    void *pte;
+    u64 iommu_l2e;
     unsigned long flags;
-    u64 maddr;
     struct hvm_iommu *hd = domain_hvm_iommu(d);
     int iw = IOMMU_IO_WRITE_ENABLED;
     int ir = IOMMU_IO_READ_ENABLED;
@@ -440,16 +458,15 @@ int amd_iommu_map_page(struct domain *d,
     if ( is_hvm_domain(d) && !hd->p2m_synchronized )
         goto out;
 
-    maddr = (u64)mfn << PAGE_SHIFT;
-    pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
-    if ( pte == NULL )
+    iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
+    if ( iommu_l2e == 0 )
     {
         amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
         spin_unlock_irqrestore(&hd->mapping_lock, flags);
         return -EFAULT;
     }
-
-    set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
+    set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
+
 out:
     spin_unlock_irqrestore(&hd->mapping_lock, flags);
     return 0;
@@ -457,10 +474,8 @@ out:
 
 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
 {
-    void *pte;
+    u64 iommu_l2e;
     unsigned long flags;
-    u64 io_addr = gfn;
-    int requestor_id;
     struct amd_iommu *iommu;
     struct hvm_iommu *hd = domain_hvm_iommu(d);
 
@@ -474,11 +489,9 @@ int amd_iommu_unmap_page(struct domain *
         return 0;
     }
 
-    requestor_id = hd->domain_id;
-    io_addr = (u64)gfn << PAGE_SHIFT;
-
-    pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
-    if ( pte == NULL )
+    iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
+
+    if ( iommu_l2e == 0 )
     {
         amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
         spin_unlock_irqrestore(&hd->mapping_lock, flags);
@@ -486,14 +499,14 @@ int amd_iommu_unmap_page(struct domain *
     }
 
     /* mark PTE as 'page not present' */
-    clear_page_table_entry_present((u32 *)pte);
+    clear_iommu_l1e_present(iommu_l2e, gfn);
     spin_unlock_irqrestore(&hd->mapping_lock, flags);
 
     /* send INVALIDATE_IOMMU_PAGES command */
     for_each_amd_iommu ( iommu )
     {
         spin_lock_irqsave(&iommu->lock, flags);
-        invalidate_iommu_page(iommu, io_addr, requestor_id);
+        invalidate_iommu_page(iommu, (u64)gfn << PAGE_SHIFT, hd->domain_id);
         flush_command_buffer(iommu);
         spin_unlock_irqrestore(&iommu->lock, flags);
     }
@@ -506,8 +519,8 @@ int amd_iommu_reserve_domain_unity_map(
     unsigned long phys_addr,
     unsigned long size, int iw, int ir)
 {
+    u64 iommu_l2e;
     unsigned long flags, npages, i;
-    void *pte;
     struct hvm_iommu *hd = domain_hvm_iommu(domain);
 
     npages = region_to_pages(phys_addr, size);
@@ -515,17 +528,20 @@ int amd_iommu_reserve_domain_unity_map(
     spin_lock_irqsave(&hd->mapping_lock, flags);
     for ( i = 0; i < npages; ++i )
     {
-        pte = get_pte_from_page_tables(
+        iommu_l2e = iommu_l2e_from_pfn(
             hd->root_table, hd->paging_mode, phys_addr >> PAGE_SHIFT);
-        if ( pte == NULL )
+
+        if ( iommu_l2e == 0 )
         {
             amd_iov_error(
             "Invalid IO pagetable entry phys_addr = %lx\n", phys_addr);
             spin_unlock_irqrestore(&hd->mapping_lock, flags);
             return -EFAULT;
         }
-        set_page_table_entry_present((u32 *)pte,
-                                     phys_addr, iw, ir);
+
+        set_iommu_l1e_present(iommu_l2e,
+            (phys_addr >> PAGE_SHIFT), phys_addr, iw, ir);
+
         phys_addr += PAGE_SIZE;
     }
     spin_unlock_irqrestore(&hd->mapping_lock, flags);
@@ -535,8 +551,7 @@ int amd_iommu_sync_p2m(struct domain *d)
 int amd_iommu_sync_p2m(struct domain *d)
 {
     unsigned long mfn, gfn, flags;
-    void *pte;
-    u64 maddr;
+    u64 iommu_l2e;
     struct list_head *entry;
     struct page_info *page;
     struct hvm_iommu *hd;
@@ -563,15 +578,16 @@ int amd_iommu_sync_p2m(struct domain *d)
         if ( gfn == INVALID_M2P_ENTRY )
             continue;
 
-        maddr = (u64)mfn << PAGE_SHIFT;
-        pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
-        if ( pte == NULL )
+        iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
+
+        if ( iommu_l2e == 0 )
         {
             amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
             spin_unlock_irqrestore(&hd->mapping_lock, flags);
             return -EFAULT;
         }
-        set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
+
+        set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
     }
 
     hd->p2m_synchronized = 1;
diff -r 6d040d138e8f -r 1dfc48a8c361 xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c       Fri Jan 09 13:00:10 
2009 +0000
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c       Fri Jan 09 15:57:57 
2009 +0000
@@ -29,17 +29,6 @@ extern struct ivrs_mappings *ivrs_mappin
 extern struct ivrs_mappings *ivrs_mappings;
 extern void *int_remap_table;
 
-static void deallocate_domain_page_tables(struct hvm_iommu *hd)
-{
-    if ( hd->root_table )
-        free_xenheap_page(hd->root_table);
-}
-
-static void deallocate_domain_resources(struct hvm_iommu *hd)
-{
-    deallocate_domain_page_tables(hd);
-}
-
 int __init amd_iommu_init(void)
 {
     struct amd_iommu *iommu;
@@ -79,8 +68,6 @@ static void amd_iommu_setup_domain_devic
     struct domain *domain, struct amd_iommu *iommu, int bdf)
 {
     void *dte;
-    u64 root_ptr;
-    u64 intremap_ptr;
     unsigned long flags;
     int req_id;
     u8 sys_mgt, dev_ex;
@@ -88,22 +75,21 @@ static void amd_iommu_setup_domain_devic
 
     BUG_ON( !hd->root_table || !hd->paging_mode || !int_remap_table );
 
-    root_ptr = (u64)virt_to_maddr(hd->root_table);
     /* get device-table entry */
     req_id = ivrs_mappings[bdf].dte_requestor_id;
-    dte = iommu->dev_table.buffer +
-        (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
-
-    intremap_ptr = (u64)virt_to_maddr(int_remap_table);
+    dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
+
+    spin_lock_irqsave(&iommu->lock, flags);
 
     if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
     {
-        spin_lock_irqsave(&iommu->lock, flags); 
-
         /* bind DTE to domain page-tables */
         sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable;
         dev_ex = ivrs_mappings[req_id].dte_allow_exclusion;
-        amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr, intremap_ptr,
+
+        amd_iommu_set_dev_table_entry((u32 *)dte,
+                                      page_to_maddr(hd->root_table),
+                                      virt_to_maddr(int_remap_table),
                                       hd->domain_id, sys_mgt, dev_ex,
                                       hd->paging_mode);
 
@@ -111,11 +97,15 @@ static void amd_iommu_setup_domain_devic
         invalidate_interrupt_table(iommu, req_id);
         flush_command_buffer(iommu);
         amd_iov_info("Enable DTE:0x%x, "
-                "root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
-                req_id, root_ptr, hd->domain_id, hd->paging_mode);
-
-        spin_unlock_irqrestore(&iommu->lock, flags);
-    }
+                "root_table:%"PRIx64", interrupt_table:%"PRIx64", "
+                "domain_id:%d, paging_mode:%d\n",
+                req_id, (u64)page_to_maddr(hd->root_table),
+                (u64)virt_to_maddr(int_remap_table), hd->domain_id,
+                hd->paging_mode);
+    }
+
+    spin_unlock_irqrestore(&iommu->lock, flags);
+
 }
 
 static void amd_iommu_setup_dom0_devices(struct domain *d)
@@ -188,10 +178,9 @@ static int allocate_domain_resources(str
     spin_lock_irqsave(&hd->mapping_lock, flags);
     if ( !hd->root_table )
     {
-        hd->root_table = (void *)alloc_xenheap_page();
+        hd->root_table = alloc_amd_iommu_pgtable();
         if ( !hd->root_table )
             goto error_out;
-        memset((u8*)hd->root_table, 0, PAGE_SIZE);
     }
     spin_unlock_irqrestore(&hd->mapping_lock, flags);
 
@@ -228,7 +217,8 @@ static int amd_iommu_domain_init(struct 
     /* allocate page directroy */
     if ( allocate_domain_resources(hd) != 0 )
     {
-        deallocate_domain_resources(hd);
+        if ( hd->root_table )
+            free_domheap_page(hd->root_table);
         return -ENOMEM;
     }
 
@@ -258,12 +248,11 @@ static void amd_iommu_disable_domain_dev
     int req_id;
 
     req_id = ivrs_mappings[bdf].dte_requestor_id;
-    dte = iommu->dev_table.buffer +
-        (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
-
+    dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
+
+    spin_lock_irqsave(&iommu->lock, flags); 
     if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
     {
-        spin_lock_irqsave(&iommu->lock, flags); 
         memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE);
         invalidate_dev_table_entry(iommu, req_id);
         flush_command_buffer(iommu);
@@ -271,8 +260,8 @@ static void amd_iommu_disable_domain_dev
                 " domain_id:%d, paging_mode:%d\n",
                 req_id,  domain_hvm_iommu(domain)->domain_id,
                 domain_hvm_iommu(domain)->paging_mode);
-        spin_unlock_irqrestore(&iommu->lock, flags);
-    }
+    }
+    spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
 static int reassign_device( struct domain *source, struct domain *target,
@@ -338,55 +327,43 @@ static int amd_iommu_assign_device(struc
     return reassign_device(dom0, d, bus, devfn);
 }
 
-static void deallocate_next_page_table(void *table, unsigned long index,
-                                       int level)
-{
-    unsigned long next_index;
-    void *next_table, *pde;
-    int next_level;
-
-    pde = table + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
-    next_table = amd_iommu_get_vptr_from_page_table_entry((u32 *)pde);
-
-    if ( next_table )
-    {
-        next_level = level - 1;
-        if ( next_level > 1 )
+static void deallocate_next_page_table(struct page_info* pg, int level)
+{
+    void *table_vaddr, *pde;
+    u64 next_table_maddr;
+    int index;
+
+    table_vaddr = map_domain_page(page_to_mfn(pg));
+
+    if ( level > 1 )
+    {
+        for ( index = 0; index < PTE_PER_TABLE_SIZE; index++ )
         {
-            next_index = 0;
-            do
+            pde = table_vaddr + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
+            next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
+            if ( next_table_maddr != 0 )
             {
-                deallocate_next_page_table(next_table,
-                                           next_index, next_level);
-                next_index++;
-            } while (next_index < PTE_PER_TABLE_SIZE);
+                deallocate_next_page_table(
+                    maddr_to_page(next_table_maddr), level - 1);
+            }
         }
-
-        free_xenheap_page(next_table);
-    }
+    }
+
+    unmap_domain_page(table_vaddr);
+    free_amd_iommu_pgtable(pg);
 }
 
 static void deallocate_iommu_page_tables(struct domain *d)
 {
-    unsigned long index;
     struct hvm_iommu *hd  = domain_hvm_iommu(d);
 
-    if ( hd ->root_table )
-    {
-        index = 0;
-
-        do
-        {
-            deallocate_next_page_table(hd->root_table,
-                                       index, hd->paging_mode);
-            index++;
-        } while ( index < PTE_PER_TABLE_SIZE );
-
-        free_xenheap_page(hd ->root_table);
-    }
-
-    hd ->root_table = NULL;
-}
+    if ( hd->root_table )
+    {
+        deallocate_next_page_table(hd->root_table, hd->paging_mode);
+        hd->root_table = NULL;
+    }
+}
+
 
 static void amd_iommu_domain_destroy(struct domain *d)
 {
diff -r 6d040d138e8f -r 1dfc48a8c361 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Fri Jan 09 13:00:10 
2009 +0000
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Fri Jan 09 15:57:57 
2009 +0000
@@ -23,6 +23,7 @@
 
 #include <xen/sched.h>
 #include <asm/amd-iommu.h>
+#include <xen/domain_page.h>
 
 #define for_each_amd_iommu(amd_iommu) \
     list_for_each_entry(amd_iommu, \
@@ -59,7 +60,7 @@ int __init amd_iommu_setup_shared_tables
 /* mapping functions */
 int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
-void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry);
+u64 amd_iommu_get_next_table_from_pte(u32 *entry);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
         unsigned long phys_addr, unsigned long size, int iw, int ir);
 int amd_iommu_sync_p2m(struct domain *d);
@@ -69,8 +70,7 @@ void amd_iommu_set_dev_table_entry(u32 *
 void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,
         u16 domain_id, u8 sys_mgt, u8 dev_ex, u8 paging_mode);
 int amd_iommu_is_dte_page_translation_valid(u32 *entry);
-void invalidate_dev_table_entry(struct amd_iommu *iommu,
-            u16 devic_id);
+void invalidate_dev_table_entry(struct amd_iommu *iommu, u16 devic_id);
 
 /* send cmd to iommu */
 int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]);
@@ -117,4 +117,36 @@ static inline unsigned long region_to_pa
     return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
 }
 
+static inline struct page_info* alloc_amd_iommu_pgtable(void)
+{
+    struct page_info *pg;
+    void *vaddr;
+
+    pg = alloc_domheap_page(NULL, 0);
+    vaddr = map_domain_page(page_to_mfn(pg));
+    if ( !vaddr )
+        return 0;
+    memset(vaddr, 0, PAGE_SIZE);
+    unmap_domain_page(vaddr);
+    return pg;
+}
+
+static inline void free_amd_iommu_pgtable(struct page_info *pg)
+{
+    if ( pg != 0 )
+        free_domheap_page(pg);
+}
+
+static inline void* __alloc_amd_iommu_tables(int order)
+{
+    void *buf;
+    buf = alloc_xenheap_pages(order);
+    return buf;
+}
+
+static inline void __free_amd_iommu_tables(void *table, int order)
+{
+    free_xenheap_pages(table, order);
+}
+
 #endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */
diff -r 6d040d138e8f -r 1dfc48a8c361 xen/include/xen/hvm/iommu.h
--- a/xen/include/xen/hvm/iommu.h       Fri Jan 09 13:00:10 2009 +0000
+++ b/xen/include/xen/hvm/iommu.h       Fri Jan 09 15:57:57 2009 +0000
@@ -40,7 +40,7 @@ struct hvm_iommu {
     /* amd iommu support */
     int domain_id;
     int paging_mode;
-    void *root_table;
+    struct page_info *root_table;
     bool_t p2m_synchronized;
 
     /* iommu_ops */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.