[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86 iommu: Define vendor-neutral interface for access to IOMMU.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1202987657 0
# Node ID 72f52dd2dba8e0a7ac616c75520e5b168ec1eb9e
# Parent  c9d9bbf1204c66fe4a067b4a61b551d5999a9315
x86 iommu: Define vendor-neutral interface for access to IOMMU.
Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/domctl.c                             |    8 
 xen/arch/x86/hvm/Makefile                         |    1 
 xen/arch/x86/hvm/iommu.c                          |  135 ++++++++
 xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c |    2 
 xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c    |  371 +++++++++++-----------
 xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c    |  346 +++++++++++++++-----
 xen/arch/x86/hvm/svm/intr.c                       |   41 ++
 xen/arch/x86/hvm/vioapic.c                        |    2 
 xen/arch/x86/hvm/vmx/vtd/intel-iommu.c            |   38 +-
 xen/arch/x86/hvm/vmx/vtd/io.c                     |   53 ---
 xen/arch/x86/mm/p2m.c                             |   17 -
 xen/include/asm-x86/hvm/iommu.h                   |    3 
 xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      |    5 
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     |   18 -
 xen/include/asm-x86/hvm/vmx/intel-iommu.h         |    8 
 xen/include/asm-x86/iommu.h                       |   16 
 16 files changed, 725 insertions(+), 339 deletions(-)

diff -r c9d9bbf1204c -r 72f52dd2dba8 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Thu Feb 14 10:36:47 2008 +0000
+++ b/xen/arch/x86/domctl.c     Thu Feb 14 11:14:17 2008 +0000
@@ -530,7 +530,7 @@ long arch_do_domctl(
         u8 bus, devfn;
 
         ret = -EINVAL;
-        if ( !vtd_enabled )
+        if ( !iommu_enabled )
             break;
 
         bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
@@ -553,7 +553,7 @@ long arch_do_domctl(
         u8 bus, devfn;
 
         ret = -EINVAL;
-        if ( !vtd_enabled )
+        if ( !iommu_enabled )
             break;
 
         if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
@@ -589,9 +589,9 @@ long arch_do_domctl(
         if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
             break;
         bind = &(domctl->u.bind_pt_irq);
-        if (vtd_enabled)
+        if ( iommu_enabled )
             ret = pt_irq_create_bind_vtd(d, bind);
-        if (ret < 0)
+        if ( ret < 0 )
             gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
         rcu_unlock_domain(d);
     }
diff -r c9d9bbf1204c -r 72f52dd2dba8 xen/arch/x86/hvm/Makefile
--- a/xen/arch/x86/hvm/Makefile Thu Feb 14 10:36:47 2008 +0000
+++ b/xen/arch/x86/hvm/Makefile Thu Feb 14 11:14:17 2008 +0000
@@ -6,6 +6,7 @@ obj-y += instrlen.o
 obj-y += instrlen.o
 obj-y += intercept.o
 obj-y += io.o
+obj-y += iommu.o
 obj-y += irq.o
 obj-y += mtrr.o
 obj-y += platform.o
diff -r c9d9bbf1204c -r 72f52dd2dba8 xen/arch/x86/hvm/iommu.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/hvm/iommu.c  Thu Feb 14 11:14:17 2008 +0000
@@ -0,0 +1,135 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <xen/init.h>
+#include <xen/irq.h>
+#include <xen/spinlock.h>
+#include <xen/sched.h>
+#include <xen/xmalloc.h>
+#include <xen/domain_page.h>
+#include <asm/delay.h>
+#include <asm/string.h>
+#include <asm/mm.h>
+#include <asm/iommu.h>
+#include <asm/hvm/vmx/intel-iommu.h>
+
+extern struct iommu_ops intel_iommu_ops;
+extern struct iommu_ops amd_iommu_ops;
+
+int iommu_domain_init(struct domain *domain)
+{
+    struct hvm_iommu *hd = domain_hvm_iommu(domain);
+
+    spin_lock_init(&hd->mapping_lock);
+    spin_lock_init(&hd->iommu_list_lock);
+    INIT_LIST_HEAD(&hd->pdev_list);
+    INIT_LIST_HEAD(&hd->g2m_ioport_list);
+
+    if ( !iommu_enabled )
+        return 0;
+
+    switch ( boot_cpu_data.x86_vendor )
+    {
+    case X86_VENDOR_INTEL:
+        hd->platform_ops = &intel_iommu_ops;
+        break;
+    case X86_VENDOR_AMD:
+        hd->platform_ops = &amd_iommu_ops;
+        break;
+    default:
+        BUG();
+    }
+
+    return hd->platform_ops->init(domain);
+}
+
+int assign_device(struct domain *d, u8 bus, u8 devfn)
+{
+    struct hvm_iommu *hd = domain_hvm_iommu(d);
+
+    if ( !iommu_enabled || !hd->platform_ops)
+        return 0;
+
+    return hd->platform_ops->assign_device(d, bus, devfn);
+}
+
+void iommu_domain_destroy(struct domain *d)
+{
+    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+    uint32_t i;
+    struct hvm_iommu *hd  = domain_hvm_iommu(d);
+    struct list_head *ioport_list, *digl_list, *tmp;
+    struct g2m_ioport *ioport;
+    struct dev_intx_gsi_link *digl;
+
+    if ( !iommu_enabled || !hd->platform_ops)
+        return;
+
+    if ( hvm_irq_dpci != NULL )
+    {
+        for ( i = 0; i < NR_IRQS; i++ )
+        {
+            if ( !hvm_irq_dpci->mirq[i].valid )
+                continue;
+
+            pirq_guest_unbind(d, i);
+            kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
+
+            list_for_each_safe ( digl_list, tmp,
+                                 &hvm_irq_dpci->mirq[i].digl_list )
+            {
+                digl = list_entry(digl_list,
+                                  struct dev_intx_gsi_link, list);
+                list_del(&digl->list);
+                xfree(digl);
+            }
+        }
+
+        d->arch.hvm_domain.irq.dpci = NULL;
+        xfree(hvm_irq_dpci);
+    }
+
+    if ( hd )
+    {
+        list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
+        {
+            ioport = list_entry(ioport_list, struct g2m_ioport, list);
+            list_del(&ioport->list);
+            xfree(ioport);
+        }
+    }
+
+    return hd->platform_ops->teardown(d);
+}
+
+int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
+{
+    struct hvm_iommu *hd = domain_hvm_iommu(d);
+
+    if ( !iommu_enabled || !hd->platform_ops)
+        return 0;
+
+    return hd->platform_ops->map_page(d, gfn, mfn);
+}
+
+int iommu_unmap_page(struct domain *d, unsigned long gfn)
+{
+    struct hvm_iommu *hd = domain_hvm_iommu(d);
+
+    if ( !iommu_enabled || !hd->platform_ops)
+        return 0;
+
+    return hd->platform_ops->unmap_page(d, gfn);
+}
diff -r c9d9bbf1204c -r 72f52dd2dba8 
xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c
--- a/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c Thu Feb 14 10:36:47 
2008 +0000
+++ b/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c Thu Feb 14 11:14:17 
2008 +0000
@@ -89,12 +89,14 @@ int __init get_iommu_capabilities(u8 bus
     u32 cap_header, cap_range;
     u64 mmio_bar;
 
+#if HACK_BIOS_SETTINGS
     /* remove it when BIOS available */
     write_pci_config(bus, dev, func,
         cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET, 0x00000000);
     write_pci_config(bus, dev, func,
         cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET, 0x40000001);
     /* remove it when BIOS available */
+#endif
 
     mmio_bar = (u64)read_pci_config(bus, dev, func,
              cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32;
diff -r c9d9bbf1204c -r 72f52dd2dba8 
xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c
--- a/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c    Thu Feb 14 10:36:47 
2008 +0000
+++ b/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c    Thu Feb 14 11:14:17 
2008 +0000
@@ -30,22 +30,20 @@ static int queue_iommu_command(struct am
     u32 tail, head, *cmd_buffer;
     int i;
 
-    BUG_ON( !iommu || !cmd );
-
     tail = iommu->cmd_buffer_tail;
-    if ( ++tail == iommu->cmd_buffer.entries ) {
+    if ( ++tail == iommu->cmd_buffer.entries )
         tail = 0;
-    }
     head = get_field_from_reg_u32(
-            readl(iommu->mmio_base+IOMMU_CMD_BUFFER_HEAD_OFFSET),
-            IOMMU_CMD_BUFFER_HEAD_MASK,
-            IOMMU_CMD_BUFFER_HEAD_SHIFT);
-    if ( head != tail ) {
+        readl(iommu->mmio_base+IOMMU_CMD_BUFFER_HEAD_OFFSET),
+        IOMMU_CMD_BUFFER_HEAD_MASK,
+        IOMMU_CMD_BUFFER_HEAD_SHIFT);
+    if ( head != tail )
+    {
         cmd_buffer = (u32 *)(iommu->cmd_buffer.buffer +
-            (iommu->cmd_buffer_tail * IOMMU_CMD_BUFFER_ENTRY_SIZE));
-        for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; ++i ) {
+                             (iommu->cmd_buffer_tail *
+                              IOMMU_CMD_BUFFER_ENTRY_SIZE));
+        for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; i++ )
             cmd_buffer[i] = cmd[i];
-        }
 
         iommu->cmd_buffer_tail = tail;
         return 1;
@@ -58,27 +56,25 @@ static void commit_iommu_command_buffer(
 {
     u32 tail;
 
-    BUG_ON( !iommu );
-
     set_field_in_reg_u32(iommu->cmd_buffer_tail, 0,
-        IOMMU_CMD_BUFFER_TAIL_MASK,
-        IOMMU_CMD_BUFFER_TAIL_SHIFT, &tail);
+                         IOMMU_CMD_BUFFER_TAIL_MASK,
+                         IOMMU_CMD_BUFFER_TAIL_SHIFT, &tail);
     writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET);
 }
 
 int send_iommu_command(struct amd_iommu *iommu, u32 cmd[])
 {
-    BUG_ON( !iommu || !cmd );
-
-    if ( queue_iommu_command(iommu, cmd) ) {
+    if ( queue_iommu_command(iommu, cmd) )
+    {
         commit_iommu_command_buffer(iommu);
         return 1;
     }
+
     return 0;
 }
 
 static void invalidate_iommu_page(struct amd_iommu *iommu,
-            u64 io_addr, u16 domain_id)
+                                  u64 io_addr, u16 domain_id)
 {
     u64 addr_lo, addr_hi;
     u32 cmd[4], entry;
@@ -87,51 +83,52 @@ static void invalidate_iommu_page(struct
     addr_hi = io_addr >> 32;
 
     set_field_in_reg_u32(domain_id, 0,
-        IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
-        IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry);
+                         IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
+                         IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry);
     set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry,
-        IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, &entry);
+                         IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
+                         &entry);
     cmd[1] = entry;
 
     set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, 0,
-        IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
-        IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
+                         IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
+                         IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
     set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
-        IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
-        IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
+                         IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
+                         IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
     set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
-        IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK,
-        IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry);
+                         IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK,
+                         IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry);
     cmd[2] = entry;
 
     set_field_in_reg_u32((u32)addr_hi, 0,
-        IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK,
-        IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry);
+                         IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK,
+                         IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry);
     cmd[3] = entry;
 
     cmd[0] = 0;
     send_iommu_command(iommu, cmd);
 }
 
-static void flush_command_buffer(struct amd_iommu *iommu)
+void flush_command_buffer(struct amd_iommu *iommu)
 {
     u32 cmd[4], status;
     int loop_count, comp_wait;
 
     /* clear 'ComWaitInt' in status register (WIC) */
     set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
-        IOMMU_STATUS_COMP_WAIT_INT_MASK,
-        IOMMU_STATUS_COMP_WAIT_INT_SHIFT, &status);
+                         IOMMU_STATUS_COMP_WAIT_INT_MASK,
+                         IOMMU_STATUS_COMP_WAIT_INT_SHIFT, &status);
     writel(status, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
 
     /* send an empty COMPLETION_WAIT command to flush command buffer */
     cmd[3] = cmd[2] = 0;
     set_field_in_reg_u32(IOMMU_CMD_COMPLETION_WAIT, 0,
-        IOMMU_CMD_OPCODE_MASK,
-        IOMMU_CMD_OPCODE_SHIFT, &cmd[1]);
+                         IOMMU_CMD_OPCODE_MASK,
+                         IOMMU_CMD_OPCODE_SHIFT, &cmd[1]);
     set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
-        IOMMU_COMP_WAIT_I_FLAG_MASK,
-        IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]);
+                         IOMMU_COMP_WAIT_I_FLAG_MASK,
+                         IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]);
     send_iommu_command(iommu, cmd);
 
     /* wait for 'ComWaitInt' to signal comp#endifletion? */
@@ -139,34 +136,36 @@ static void flush_command_buffer(struct 
         loop_count = amd_iommu_poll_comp_wait;
         do {
             status = readl(iommu->mmio_base +
-                    IOMMU_STATUS_MMIO_OFFSET);
-            comp_wait = get_field_from_reg_u32(status,
-                    IOMMU_STATUS_COMP_WAIT_INT_MASK,
-                    IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
+                           IOMMU_STATUS_MMIO_OFFSET);
+            comp_wait = get_field_from_reg_u32(
+                status,
+                IOMMU_STATUS_COMP_WAIT_INT_MASK,
+                IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
             --loop_count;
         } while ( loop_count && !comp_wait );
 
-        if ( comp_wait ) {
+        if ( comp_wait )
+        {
             /* clear 'ComWaitInt' in status register (WIC) */
             status &= IOMMU_STATUS_COMP_WAIT_INT_MASK;
             writel(status, iommu->mmio_base +
-                IOMMU_STATUS_MMIO_OFFSET);
-        } else
-            dprintk(XENLOG_WARNING, "AMD IOMMU: %s(): Warning:"
-                " ComWaitInt bit did not assert!\n",
-                 __FUNCTION__);
+                   IOMMU_STATUS_MMIO_OFFSET);
+        }
+        else
+            dprintk(XENLOG_WARNING, "AMD IOMMU: Warning:"
+                    " ComWaitInt bit did not assert!\n");
     }
 }
 
 static void clear_page_table_entry_present(u32 *pte)
 {
     set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, pte[0],
-        IOMMU_PTE_PRESENT_MASK,
-        IOMMU_PTE_PRESENT_SHIFT, &pte[0]);
+                         IOMMU_PTE_PRESENT_MASK,
+                         IOMMU_PTE_PRESENT_SHIFT, &pte[0]);
 }
 
 static void set_page_table_entry_present(u32 *pte, u64 page_addr,
-                int iw, int ir)
+                                         int iw, int ir)
 {
     u64 addr_lo, addr_hi;
     u32 entry;
@@ -175,33 +174,33 @@ static void set_page_table_entry_present
     addr_hi = page_addr >> 32;
 
     set_field_in_reg_u32((u32)addr_hi, 0,
-        IOMMU_PTE_ADDR_HIGH_MASK,
-        IOMMU_PTE_ADDR_HIGH_SHIFT, &entry);
+                         IOMMU_PTE_ADDR_HIGH_MASK,
+                         IOMMU_PTE_ADDR_HIGH_SHIFT, &entry);
     set_field_in_reg_u32(iw ? IOMMU_CONTROL_ENABLED :
-        IOMMU_CONTROL_DISABLED, entry,
-        IOMMU_PTE_IO_WRITE_PERMISSION_MASK,
-        IOMMU_PTE_IO_WRITE_PERMISSION_SHIFT, &entry);
+                         IOMMU_CONTROL_DISABLED, entry,
+                         IOMMU_PTE_IO_WRITE_PERMISSION_MASK,
+                         IOMMU_PTE_IO_WRITE_PERMISSION_SHIFT, &entry);
     set_field_in_reg_u32(ir ? IOMMU_CONTROL_ENABLED :
-        IOMMU_CONTROL_DISABLED, entry,
-        IOMMU_PTE_IO_READ_PERMISSION_MASK,
-        IOMMU_PTE_IO_READ_PERMISSION_SHIFT, &entry);
+                         IOMMU_CONTROL_DISABLED, entry,
+                         IOMMU_PTE_IO_READ_PERMISSION_MASK,
+                         IOMMU_PTE_IO_READ_PERMISSION_SHIFT, &entry);
     pte[1] = entry;
 
     set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
-        IOMMU_PTE_ADDR_LOW_MASK,
-        IOMMU_PTE_ADDR_LOW_SHIFT, &entry);
+                         IOMMU_PTE_ADDR_LOW_MASK,
+                         IOMMU_PTE_ADDR_LOW_SHIFT, &entry);
     set_field_in_reg_u32(IOMMU_PAGING_MODE_LEVEL_0, entry,
-        IOMMU_PTE_NEXT_LEVEL_MASK,
-        IOMMU_PTE_NEXT_LEVEL_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-        IOMMU_PTE_PRESENT_MASK,
-        IOMMU_PTE_PRESENT_SHIFT, &entry);
+                         IOMMU_PTE_NEXT_LEVEL_MASK,
+                         IOMMU_PTE_NEXT_LEVEL_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_PTE_PRESENT_MASK,
+                         IOMMU_PTE_PRESENT_SHIFT, &entry);
     pte[0] = entry;
 }
 
 
 static void amd_iommu_set_page_directory_entry(u32 *pde, 
-            u64 next_ptr, u8 next_level)
+                                               u64 next_ptr, u8 next_level)
 {
     u64 addr_lo, addr_hi;
     u32 entry;
@@ -211,29 +210,31 @@ static void amd_iommu_set_page_directory
 
     /* enable read/write permissions,which will be enforced at the PTE */
     set_field_in_reg_u32((u32)addr_hi, 0,
-        IOMMU_PDE_ADDR_HIGH_MASK, IOMMU_PDE_ADDR_HIGH_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-        IOMMU_PDE_IO_WRITE_PERMISSION_MASK,
-        IOMMU_PDE_IO_WRITE_PERMISSION_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-        IOMMU_PDE_IO_READ_PERMISSION_MASK,
-        IOMMU_PDE_IO_READ_PERMISSION_SHIFT, &entry);
+                         IOMMU_PDE_ADDR_HIGH_MASK,
+                         IOMMU_PDE_ADDR_HIGH_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_PDE_IO_WRITE_PERMISSION_MASK,
+                         IOMMU_PDE_IO_WRITE_PERMISSION_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_PDE_IO_READ_PERMISSION_MASK,
+                         IOMMU_PDE_IO_READ_PERMISSION_SHIFT, &entry);
     pde[1] = entry;
 
     /* mark next level as 'present' */
     set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
-        IOMMU_PDE_ADDR_LOW_MASK, IOMMU_PDE_ADDR_LOW_SHIFT, &entry);
+                         IOMMU_PDE_ADDR_LOW_MASK,
+                         IOMMU_PDE_ADDR_LOW_SHIFT, &entry);
     set_field_in_reg_u32(next_level, entry,
-        IOMMU_PDE_NEXT_LEVEL_MASK,
-        IOMMU_PDE_NEXT_LEVEL_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-        IOMMU_PDE_PRESENT_MASK,
-        IOMMU_PDE_PRESENT_SHIFT, &entry);
+                         IOMMU_PDE_NEXT_LEVEL_MASK,
+                         IOMMU_PDE_NEXT_LEVEL_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_PDE_PRESENT_MASK,
+                         IOMMU_PDE_PRESENT_SHIFT, &entry);
     pde[0] = entry;
 }
 
 void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u16 domain_id,
-                u8 paging_mode)
+                                   u8 paging_mode)
 {
     u64 addr_hi, addr_lo;
     u32 entry;
@@ -241,54 +242,56 @@ void amd_iommu_set_dev_table_entry(u32 *
     dte[6] = dte[5] = dte[4] = 0;
 
     set_field_in_reg_u32(IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED, 0,
-        IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK,
-        IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT, &entry);
+                         IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK,
+                         IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT, &entry);
     dte[3] = entry;
 
     set_field_in_reg_u32(domain_id, 0,
-        IOMMU_DEV_TABLE_DOMAIN_ID_MASK,
-        IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry);
+                         IOMMU_DEV_TABLE_DOMAIN_ID_MASK,
+                         IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry);
     dte[2] = entry;
 
     addr_lo = root_ptr & DMA_32BIT_MASK;
     addr_hi = root_ptr >> 32;
     set_field_in_reg_u32((u32)addr_hi, 0,
+                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
+                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_MASK,
+                         IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_DEV_TABLE_IO_READ_PERMISSION_MASK,
+                         IOMMU_DEV_TABLE_IO_READ_PERMISSION_SHIFT, &entry);
+    dte[1] = entry;
+
+    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
+                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
+                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT, &entry);
+    set_field_in_reg_u32(paging_mode, entry,
+                         IOMMU_DEV_TABLE_PAGING_MODE_MASK,
+                         IOMMU_DEV_TABLE_PAGING_MODE_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
+                         IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_DEV_TABLE_VALID_MASK,
+                         IOMMU_DEV_TABLE_VALID_SHIFT, &entry);
+    dte[0] = entry;
+}
+
+void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry)
+{
+    u64 addr_lo, addr_hi, ptr;
+
+    addr_lo = get_field_from_reg_u32(
+        entry[0],
+        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
+        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT);
+
+    addr_hi = get_field_from_reg_u32(
+        entry[1],
         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
-        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-        IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_MASK,
-        IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-        IOMMU_DEV_TABLE_IO_READ_PERMISSION_MASK,
-        IOMMU_DEV_TABLE_IO_READ_PERMISSION_SHIFT, &entry);
-    dte[1] = entry;
-
-    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
-        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
-        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT, &entry);
-    set_field_in_reg_u32(paging_mode, entry,
-        IOMMU_DEV_TABLE_PAGING_MODE_MASK,
-        IOMMU_DEV_TABLE_PAGING_MODE_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-        IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
-        IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-        IOMMU_DEV_TABLE_VALID_MASK,
-        IOMMU_DEV_TABLE_VALID_SHIFT, &entry);
-    dte[0] = entry;
-}
-
-static void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry)
-{
-    u64 addr_lo, addr_hi, ptr;
-
-    addr_lo = get_field_from_reg_u32(entry[0],
-            IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
-            IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT);
-
-    addr_hi = get_field_from_reg_u32(entry[1],
-            IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
-            IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT);
+        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT);
 
     ptr = (addr_hi << 32) | (addr_lo << PAGE_SHIFT);
     return ptr ? maddr_to_virt((unsigned long)ptr) : NULL;
@@ -297,42 +300,74 @@ static int amd_iommu_is_pte_present(u32 
 static int amd_iommu_is_pte_present(u32 *entry)
 {
     return (get_field_from_reg_u32(entry[0],
-            IOMMU_PDE_PRESENT_MASK,
-            IOMMU_PDE_PRESENT_SHIFT));
+                                   IOMMU_PDE_PRESENT_MASK,
+                                   IOMMU_PDE_PRESENT_SHIFT));
+}
+
+void invalidate_dev_table_entry(struct amd_iommu *iommu,
+                                u16 device_id)
+{
+    u32 cmd[4], entry;
+
+    cmd[3] = cmd[2] = 0;
+    set_field_in_reg_u32(device_id, 0,
+                         IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK,
+                         IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT, &entry);
+    cmd[0] = entry;
+
+    set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY, 0,
+                         IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
+                         &entry);
+    cmd[1] = entry;
+
+    send_iommu_command(iommu, cmd);
+}
+
+int amd_iommu_is_dte_page_translation_valid(u32 *entry)
+{
+    return (get_field_from_reg_u32(entry[0],
+                                   IOMMU_DEV_TABLE_VALID_MASK,
+                                   IOMMU_DEV_TABLE_VALID_SHIFT) &&
+            get_field_from_reg_u32(entry[0],
+                                   IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
+                                   IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT));
 }
 
 static void *get_pte_from_page_tables(void *table, int level,
-        unsigned long io_pfn)
+                                      unsigned long io_pfn)
 {
     unsigned long offset;
-    void *pde = 0;
-
-    BUG_ON( !table );
+    void *pde = NULL;
+
+    BUG_ON(table == NULL);
 
     while ( level > 0 )
     {
-        void *next_table = 0;
-        unsigned long next_ptr;
         offset = io_pfn >> ((PTE_PER_TABLE_SHIFT *
-            (level - IOMMU_PAGING_MODE_LEVEL_1)));
+                             (level - IOMMU_PAGING_MODE_LEVEL_1)));
         offset &= ~PTE_PER_TABLE_MASK;
         pde = table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE);
 
         if ( level == 1 )
             break;
         if ( !pde )
-           return NULL;
-        if ( !amd_iommu_is_pte_present(pde) ) {
-            next_table = alloc_xenheap_page();
+            return NULL;
+        if ( !amd_iommu_is_pte_present(pde) )
+        {
+            void *next_table = alloc_xenheap_page();
             if ( next_table == NULL )
                 return NULL;
             memset(next_table, 0, PAGE_SIZE);
-            if ( *(u64*)(pde) == 0 ) {
-                next_ptr = (u64)virt_to_maddr(next_table);
-                amd_iommu_set_page_directory_entry((u32 *)pde,
-                    next_ptr, level - 1);
-            } else
+            if ( *(u64 *)pde == 0 )
+            {
+                unsigned long next_ptr = (u64)virt_to_maddr(next_table);
+                amd_iommu_set_page_directory_entry(
+                    (u32 *)pde, next_ptr, level - 1);
+            }
+            else
+            {
                 free_xenheap_page(next_table);
+            }
         }
         table = amd_iommu_get_vptr_from_page_table_entry(pde);
         level--;
@@ -341,8 +376,7 @@ static void *get_pte_from_page_tables(vo
     return pde;
 }
 
-int amd_iommu_map_page(struct domain *d, unsigned long gfn,
-        unsigned long mfn)
+int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
 {
     void *pte;
     unsigned long flags;
@@ -352,7 +386,7 @@ int amd_iommu_map_page(struct domain *d,
 
     BUG_ON( !hd->root_table );
 
-    maddr = (u64)(mfn << PAGE_SHIFT);
+    maddr = (u64)mfn << PAGE_SHIFT;
 
     iw = IOMMU_IO_WRITE_ENABLED;
     ir = IOMMU_IO_READ_ENABLED;
@@ -360,18 +394,18 @@ int amd_iommu_map_page(struct domain *d,
     spin_lock_irqsave(&hd->mapping_lock, flags);
 
     pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
-
-    if ( pte != 0 ) {
-        set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
-        spin_unlock_irqrestore(&hd->mapping_lock, flags);
-        return 0;
-    } else {
+    if ( pte == 0 )
+    {
         dprintk(XENLOG_ERR,
-            "%s() AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n",
-            __FUNCTION__, gfn);
+                "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn);
         spin_unlock_irqrestore(&hd->mapping_lock, flags);
         return -EIO;
     }
+
+    set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
+
+    spin_unlock_irqrestore(&hd->mapping_lock, flags);
+    return 0;
 }
 
 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
@@ -386,34 +420,31 @@ int amd_iommu_unmap_page(struct domain *
     BUG_ON( !hd->root_table );
 
     requestor_id = hd->domain_id;
-    io_addr = (u64)(gfn << PAGE_SHIFT);
+    io_addr = (u64)gfn << PAGE_SHIFT;
 
     spin_lock_irqsave(&hd->mapping_lock, flags);
 
     pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
-
-    if ( pte != 0 ) {
-        /* mark PTE as 'page not present' */
-        clear_page_table_entry_present((u32 *)pte);
-        spin_unlock_irqrestore(&hd->mapping_lock, flags);
-
-        /* send INVALIDATE_IOMMU_PAGES command */
-        for_each_amd_iommu(iommu) {
-
-            spin_lock_irqsave(&iommu->lock, flags);
-
-            invalidate_iommu_page(iommu, io_addr, requestor_id);
-            flush_command_buffer(iommu);
-
-            spin_unlock_irqrestore(&iommu->lock, flags);
-        }
-
-        return 0;
-    } else {
+    if ( pte == 0 )
+    {
         dprintk(XENLOG_ERR,
-            "%s() AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", 
-            __FUNCTION__, gfn);
+                "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn);
         spin_unlock_irqrestore(&hd->mapping_lock, flags);
         return -EIO;
     }
-}
+
+    /* mark PTE as 'page not present' */
+    clear_page_table_entry_present((u32 *)pte);
+    spin_unlock_irqrestore(&hd->mapping_lock, flags);
+
+    /* send INVALIDATE_IOMMU_PAGES command */
+    for_each_amd_iommu(iommu)
+    {
+        spin_lock_irqsave(&iommu->lock, flags);
+        invalidate_iommu_page(iommu, io_addr, requestor_id);
+        flush_command_buffer(iommu);
+        spin_unlock_irqrestore(&iommu->lock, flags);
+    }
+
+    return 0;
+}
diff -r c9d9bbf1204c -r 72f52dd2dba8 
xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c
--- a/xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c    Thu Feb 14 10:36:47 
2008 +0000
+++ b/xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c    Thu Feb 14 11:14:17 
2008 +0000
@@ -51,19 +51,17 @@ static void __init init_cleanup(void)
 {
     struct amd_iommu *iommu;
 
-    dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__);
-
-    for_each_amd_iommu(iommu) {
+    for_each_amd_iommu ( iommu )
         unmap_iommu_mmio_region(iommu);
-    }
 }
 
 static void __init deallocate_iommu_table_struct(
-            struct table_struct *table)
-{
-    if (table->buffer) {
+    struct table_struct *table)
+{
+    if ( table->buffer )
+    {
         free_xenheap_pages(table->buffer,
-            get_order_from_bytes(table->alloc_size));
+                           get_order_from_bytes(table->alloc_size));
         table->buffer = NULL;
     }
 }
@@ -76,11 +74,10 @@ static void __init deallocate_iommu_reso
 
 static void __init detect_cleanup(void)
 {
-    struct amd_iommu *iommu;
-
-    dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__);
-
-    for_each_amd_iommu(iommu) {
+    struct amd_iommu *iommu, *next;
+
+    list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list )
+    {
         list_del(&iommu->list);
         deallocate_iommu_resources(iommu);
         xfree(iommu);
@@ -91,19 +88,21 @@ static int requestor_id_from_bdf(int bdf
 {
     /* HACK - HACK */
     /* account for possible 'aliasing' by parent device */
-   return bdf;
+    return bdf;
 }
 
 static int __init allocate_iommu_table_struct(struct table_struct *table,
-            const char *name)
+                                              const char *name)
 {
     table->buffer = (void *) alloc_xenheap_pages(
         get_order_from_bytes(table->alloc_size));
 
-    if ( !table->buffer ) {
+    if ( !table->buffer )
+    {
         dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating %s\n", name);
         return -ENOMEM;
     }
+
     memset(table->buffer, 0, table->alloc_size);
 
     return 0;
@@ -114,32 +113,32 @@ static int __init allocate_iommu_resourc
     /* allocate 'device table' on a 4K boundary */
     iommu->dev_table.alloc_size =
         PAGE_ALIGN(((iommu->last_downstream_bus + 1) *
-        IOMMU_DEV_TABLE_ENTRIES_PER_BUS) *
-        IOMMU_DEV_TABLE_ENTRY_SIZE);
+                    IOMMU_DEV_TABLE_ENTRIES_PER_BUS) *
+                   IOMMU_DEV_TABLE_ENTRY_SIZE);
     iommu->dev_table.entries =
         iommu->dev_table.alloc_size / IOMMU_DEV_TABLE_ENTRY_SIZE;
 
-    if (allocate_iommu_table_struct(&iommu->dev_table,
-            "Device Table") != 0)
+    if ( allocate_iommu_table_struct(&iommu->dev_table,
+                                     "Device Table") != 0 )
         goto error_out;
 
     /* allocate 'command buffer' in power of 2 increments of 4K */
     iommu->cmd_buffer_tail = 0;
     iommu->cmd_buffer.alloc_size =
         PAGE_SIZE << get_order_from_bytes(
-        PAGE_ALIGN(amd_iommu_cmd_buffer_entries *
-        IOMMU_CMD_BUFFER_ENTRY_SIZE));
-
-   iommu->cmd_buffer.entries =
+            PAGE_ALIGN(amd_iommu_cmd_buffer_entries *
+                       IOMMU_CMD_BUFFER_ENTRY_SIZE));
+
+    iommu->cmd_buffer.entries =
         iommu->cmd_buffer.alloc_size / IOMMU_CMD_BUFFER_ENTRY_SIZE;
 
     if ( allocate_iommu_table_struct(&iommu->cmd_buffer,
-            "Command Buffer") != 0 )
-        goto error_out;
-
-    return 0;
-
-error_out:
+                                     "Command Buffer") != 0 )
+        goto error_out;
+
+    return 0;
+
+ error_out:
     deallocate_iommu_resources(iommu);
     return -ENOMEM;
 }
@@ -149,7 +148,8 @@ int iommu_detect_callback(u8 bus, u8 dev
     struct amd_iommu *iommu;
 
     iommu = (struct amd_iommu *) xmalloc(struct amd_iommu);
-    if ( !iommu ) {
+    if ( !iommu )
+    {
         dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating amd_iommu\n");
         return -ENOMEM;
     }
@@ -170,7 +170,7 @@ int iommu_detect_callback(u8 bus, u8 dev
 
     return 0;
 
-error_out:
+ error_out:
     xfree(iommu);
     return -ENODEV;
 }
@@ -180,11 +180,12 @@ static int __init amd_iommu_init(void)
     struct amd_iommu *iommu;
     unsigned long flags;
 
-    for_each_amd_iommu(iommu) {
+    for_each_amd_iommu ( iommu )
+    {
         spin_lock_irqsave(&iommu->lock, flags);
 
         /* register IOMMU data strucures in MMIO space */
-        if (map_iommu_mmio_region(iommu) != 0)
+        if ( map_iommu_mmio_region(iommu) != 0 )
             goto error_out;
         register_iommu_dev_table_in_mmio_space(iommu);
         register_iommu_cmd_buffer_in_mmio_space(iommu);
@@ -200,7 +201,7 @@ static int __init amd_iommu_init(void)
 
     return 0;
 
-error_out:
+ error_out:
     init_cleanup();
     return -ENODEV;
 }
@@ -209,13 +210,16 @@ struct amd_iommu *find_iommu_for_device(
 {
     struct amd_iommu *iommu;
 
-    for_each_amd_iommu(iommu) {
-        if ( bus == iommu->root_bus ) {
-            if ( devfn >= iommu->first_devfn &&
-                devfn <= iommu->last_devfn )
+    for_each_amd_iommu ( iommu )
+    {
+        if ( bus == iommu->root_bus )
+        {
+            if ( (devfn >= iommu->first_devfn) &&
+                 (devfn <= iommu->last_devfn) )
                 return iommu;
         }
-        else if ( bus <= iommu->last_downstream_bus ) {
+        else if ( bus <= iommu->last_downstream_bus )
+        {
             if ( iommu->downstream_bus_present[bus] )
                 return iommu;
         }
@@ -238,16 +242,21 @@ void amd_iommu_setup_domain_device(
     dte = iommu->dev_table.buffer +
         (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
 
-    spin_lock_irqsave(&iommu->lock, flags); 
-
-    amd_iommu_set_dev_table_entry((u32 *)dte,
-        root_ptr, hd->domain_id, hd->paging_mode);
-
-    dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, "
-            "root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
-            requestor_id, root_ptr, hd->domain_id, hd->paging_mode);
-
-    spin_unlock_irqrestore(&iommu->lock, flags);
+    if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
+    {
+        spin_lock_irqsave(&iommu->lock, flags); 
+
+        amd_iommu_set_dev_table_entry(
+            (u32 *)dte,
+            root_ptr, hd->domain_id, hd->paging_mode);
+        invalidate_dev_table_entry(iommu, requestor_id);
+        flush_command_buffer(iommu);
+        dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, "
+                "root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
+                requestor_id, root_ptr, hd->domain_id, hd->paging_mode);
+
+        spin_unlock_irqrestore(&iommu->lock, flags);
+    }
 }
 
 void __init amd_iommu_setup_dom0_devices(void)
@@ -259,13 +268,16 @@ void __init amd_iommu_setup_dom0_devices
     u32 l;
     int req_id, bdf;
 
-    for ( bus = 0; bus < 256; bus++ ) {
-        for ( dev = 0; dev < 32; dev++ ) {
-            for ( func = 0; func < 8; func++ ) {
+    for ( bus = 0; bus < 256; bus++ )
+    {
+        for ( dev = 0; dev < 32; dev++ )
+        {
+            for ( func = 0; func < 8; func++ )
+            {
                 l = read_pci_config(bus, dev, func, PCI_VENDOR_ID);
                 /* some broken boards return 0 or ~0 if a slot is empty: */
                 if ( l == 0xffffffff || l == 0x00000000 ||
-                    l == 0x0000ffff || l == 0xffff0000 )
+                     l == 0x0000ffff || l == 0xffff0000 )
                     continue;
 
                 pdev = xmalloc(struct pci_dev);
@@ -288,29 +300,33 @@ int amd_iommu_detect(void)
 {
     unsigned long i;
 
-    if ( !enable_amd_iommu ) {
+    if ( !enable_amd_iommu )
+    {
         printk("AMD IOMMU: Disabled\n");
         return 0;
     }
 
     INIT_LIST_HEAD(&amd_iommu_head);
 
-    if ( scan_for_iommu(iommu_detect_callback) != 0 ) {
+    if ( scan_for_iommu(iommu_detect_callback) != 0 )
+    {
         dprintk(XENLOG_ERR, "AMD IOMMU: Error detection\n");
         goto error_out;
     }
 
-    if ( !iommu_found() ) {
+    if ( !iommu_found() )
+    {
         printk("AMD IOMMU: Not found!\n");
         return 0;
     }
 
-    if ( amd_iommu_init() != 0 ) {
+    if ( amd_iommu_init() != 0 )
+    {
         dprintk(XENLOG_ERR, "AMD IOMMU: Error initialization\n");
         goto error_out;
     }
 
-    if ( amd_iommu_domain_init(dom0) != 0 )
+    if ( iommu_domain_init(dom0) != 0 )
         goto error_out;
 
     /* setup 1:1 page table for dom0 */
@@ -320,21 +336,31 @@ int amd_iommu_detect(void)
     amd_iommu_setup_dom0_devices();
     return 0;
 
-error_out:
-     detect_cleanup();
-     return -ENODEV;
+ error_out:
+    detect_cleanup();
+    return -ENODEV;
 
 }
 
 static int allocate_domain_resources(struct hvm_iommu *hd)
 {
     /* allocate root table */
-    hd->root_table = (void *)alloc_xenheap_page();
+    unsigned long flags;
+
+    spin_lock_irqsave(&hd->mapping_lock, flags);
     if ( !hd->root_table )
-        return -ENOMEM;
-    memset((u8*)hd->root_table, 0, PAGE_SIZE);
-
-    return 0;
+    {
+        hd->root_table = (void *)alloc_xenheap_page();
+        if ( !hd->root_table )
+            goto error_out;
+        memset((u8*)hd->root_table, 0, PAGE_SIZE);
+    }
+    spin_unlock_irqrestore(&hd->mapping_lock, flags);
+
+    return 0;
+ error_out:
+    spin_unlock_irqrestore(&hd->mapping_lock, flags);
+    return -ENOMEM;
 }
 
 static int get_paging_mode(unsigned long entries)
@@ -346,7 +372,8 @@ static int get_paging_mode(unsigned long
     if ( entries > max_page )
         entries = max_page;
 
-    while ( entries > PTE_PER_TABLE_SIZE ) {
+    while ( entries > PTE_PER_TABLE_SIZE )
+    {
         entries = PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT;
         ++level;
         if ( level > 6 )
@@ -362,14 +389,11 @@ int amd_iommu_domain_init(struct domain 
 {
     struct hvm_iommu *hd = domain_hvm_iommu(domain);
 
-    spin_lock_init(&hd->mapping_lock);
-    spin_lock_init(&hd->iommu_list_lock);
-    INIT_LIST_HEAD(&hd->pdev_list);
-
     /* allocate page directroy */
-    if ( allocate_domain_resources(hd) != 0 ) {
-        dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__);
-        goto error_out;
+    if ( allocate_domain_resources(hd) != 0 )
+    {
+        deallocate_domain_resources(hd);
+        return -ENOMEM;
     }
 
     if ( is_hvm_domain(domain) )
@@ -380,10 +404,168 @@ int amd_iommu_domain_init(struct domain 
     hd->domain_id = domain->domain_id;
 
     return 0;
-
-error_out:
-    deallocate_domain_resources(hd);
-    return -ENOMEM;
-}
-
-
+}
+
+static void amd_iommu_disable_domain_device(
+    struct domain *domain, struct amd_iommu *iommu, u16 requestor_id)
+{
+    void *dte;
+    unsigned long flags;
+
+    dte = iommu->dev_table.buffer +
+        (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
+
+    if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
+    {
+        spin_lock_irqsave(&iommu->lock, flags); 
+        memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE);
+        invalidate_dev_table_entry(iommu, requestor_id);
+        flush_command_buffer(iommu);
+        dprintk(XENLOG_INFO , "AMD IOMMU: disable DTE 0x%x,"
+                " domain_id:%d, paging_mode:%d\n",
+                requestor_id,  domain_hvm_iommu(domain)->domain_id,
+                domain_hvm_iommu(domain)->paging_mode);
+        spin_unlock_irqrestore(&iommu->lock, flags);
+    }
+}
+
+extern void pdev_flr(u8 bus, u8 devfn);
+
+static int reassign_device( struct domain *source, struct domain *target,
+                            u8 bus, u8 devfn)
+{
+    struct hvm_iommu *source_hd = domain_hvm_iommu(source);
+    struct hvm_iommu *target_hd = domain_hvm_iommu(target);
+    struct pci_dev *pdev;
+    struct amd_iommu *iommu;
+    int req_id, bdf;
+    unsigned long flags;
+
+    for_each_pdev( source, pdev )
+    {
+        if ( (pdev->bus != bus) || (pdev->devfn != devfn) )
+            continue;
+
+        pdev->bus = bus;
+        pdev->devfn = devfn;
+
+        bdf = (bus << 8) | devfn;
+        req_id = requestor_id_from_bdf(bdf);
+        iommu = find_iommu_for_device(bus, devfn);
+
+        if ( iommu )
+        {
+            amd_iommu_disable_domain_device(source, iommu, req_id);
+            /* Move pci device from the source domain to target domain. */
+            spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
+            spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
+            list_move(&pdev->list, &target_hd->pdev_list);
+            spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
+            spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
+
+            amd_iommu_setup_domain_device(target, iommu, req_id);
+            gdprintk(XENLOG_INFO ,
+                     "AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n",
+                     bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+                     source->domain_id, target->domain_id);
+        }
+        else
+        {
+            gdprintk(XENLOG_ERR , "AMD IOMMU: fail to find iommu."
+                     " %x:%x.%x cannot be assigned to domain %d\n", 
+                     bus, PCI_SLOT(devfn), PCI_FUNC(devfn), target->domain_id);
+            return -ENODEV;
+        }
+
+        break;
+    }
+    return 0;
+}
+
+int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
+{
+    pdev_flr(bus, devfn);
+    return reassign_device(dom0, d, bus, devfn);
+}
+
+static void release_domain_devices(struct domain *d)
+{
+    struct hvm_iommu *hd  = domain_hvm_iommu(d);
+    struct pci_dev *pdev;
+
+    while ( !list_empty(&hd->pdev_list) )
+    {
+        pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list);
+        pdev_flr(pdev->bus, pdev->devfn);
+        gdprintk(XENLOG_INFO ,
+                 "AMD IOMMU: release devices %x:%x.%x\n",
+                 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+        reassign_device(d, dom0, pdev->bus, pdev->devfn);
+    }
+}
+
+static void deallocate_next_page_table(void *table, unsigned long index,
+                                       int level)
+{
+    unsigned long next_index;
+    void *next_table, *pde;
+    int next_level;
+
+    pde = table + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
+    next_table = amd_iommu_get_vptr_from_page_table_entry((u32 *)pde);
+
+    if ( next_table )
+    {
+        next_level = level - 1;
+        if ( next_level > 1 )
+        {
+            next_index = 0;
+            do
+            {
+                deallocate_next_page_table(next_table,
+                                           next_index, next_level);
+                ++next_index;
+            } while (next_index < PTE_PER_TABLE_SIZE);
+        }
+
+        free_xenheap_page(next_table);
+    }
+}
+
+static void deallocate_iommu_page_tables(struct domain *d)
+{
+    unsigned long index;
+    struct hvm_iommu *hd  = domain_hvm_iommu(d);
+
+    if ( hd ->root_table )
+    {
+        index = 0;
+        do
+        {
+            deallocate_next_page_table(hd->root_table,
+                                       index, hd->paging_mode);
+            ++index;
+        } while ( index < PTE_PER_TABLE_SIZE );
+
+        free_xenheap_page(hd ->root_table);
+    }
+
+    hd ->root_table = NULL;
+}
+
+void amd_iommu_domain_destroy(struct domain *d)
+{
+    if ( !amd_iommu_enabled )
+        return;
+
+    deallocate_iommu_page_tables(d);
+    release_domain_devices(d);
+}
+
+struct iommu_ops amd_iommu_ops = {
+    .init = amd_iommu_domain_init,
+    .assign_device  = amd_iommu_assign_device,
+    .teardown = amd_iommu_domain_destroy,
+    .map_page = amd_iommu_map_page,
+    .unmap_page = amd_iommu_unmap_page,
+};
diff -r c9d9bbf1204c -r 72f52dd2dba8 xen/arch/x86/hvm/svm/intr.c
--- a/xen/arch/x86/hvm/svm/intr.c       Thu Feb 14 10:36:47 2008 +0000
+++ b/xen/arch/x86/hvm/svm/intr.c       Thu Feb 14 11:14:17 2008 +0000
@@ -94,6 +94,46 @@ static void enable_intr_window(struct vc
     vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
 }
 
+static void svm_dirq_assist(struct vcpu *v)
+{
+    unsigned int irq;
+    uint32_t device, intx;
+    struct domain *d = v->domain;
+    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+    struct dev_intx_gsi_link *digl;
+
+    if ( !amd_iommu_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) )
+        return;
+
+    for ( irq = find_first_bit(hvm_irq_dpci->dirq_mask, NR_IRQS);
+          irq < NR_IRQS;
+          irq = find_next_bit(hvm_irq_dpci->dirq_mask, NR_IRQS, irq + 1) )
+    {
+        stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)]);
+        clear_bit(irq, &hvm_irq_dpci->dirq_mask);
+
+        list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
+        {
+            device = digl->device;
+            intx = digl->intx;
+            hvm_pci_intx_assert(d, device, intx);
+            spin_lock(&hvm_irq_dpci->dirq_lock);
+            hvm_irq_dpci->mirq[irq].pending++;
+            spin_unlock(&hvm_irq_dpci->dirq_lock);
+        }
+
+        /*
+         * Set a timer to see if the guest can finish the interrupt or not. For
+         * example, the guest OS may unmask the PIC during boot, before the
+         * guest driver is loaded. hvm_pci_intx_assert() may succeed, but the
+         * guest will never deal with the irq, then the physical interrupt line
+         * will never be deasserted.
+         */
+        set_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)],
+                  NOW() + PT_IRQ_TIME_OUT);
+    }
+}
+
 asmlinkage void svm_intr_assist(void) 
 {
     struct vcpu *v = current;
@@ -102,6 +142,7 @@ asmlinkage void svm_intr_assist(void)
 
     /* Crank the handle on interrupt state. */
     pt_update_irq(v);
+    svm_dirq_assist(v);
 
     do {
         intack = hvm_vcpu_has_pending_irq(v);
diff -r c9d9bbf1204c -r 72f52dd2dba8 xen/arch/x86/hvm/vioapic.c
--- a/xen/arch/x86/hvm/vioapic.c        Thu Feb 14 10:36:47 2008 +0000
+++ b/xen/arch/x86/hvm/vioapic.c        Thu Feb 14 11:14:17 2008 +0000
@@ -458,7 +458,7 @@ void vioapic_update_EOI(struct domain *d
 
     ent->fields.remote_irr = 0;
 
-    if ( vtd_enabled )
+    if ( iommu_enabled )
     {
         spin_unlock(&d->arch.hvm_domain.irq_lock);
         hvm_dpci_eoi(current->domain, gsi, ent);
diff -r c9d9bbf1204c -r 72f52dd2dba8 xen/arch/x86/hvm/vmx/vtd/intel-iommu.c
--- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c    Thu Feb 14 10:36:47 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c    Thu Feb 14 11:14:17 2008 +0000
@@ -1047,7 +1047,7 @@ static void free_iommu(struct iommu *iom
         agaw = 64;                              \
     agaw; })
 
-int iommu_domain_init(struct domain *domain)
+int intel_iommu_domain_init(struct domain *domain)
 {
     struct hvm_iommu *hd = domain_hvm_iommu(domain);
     struct iommu *iommu = NULL;
@@ -1055,11 +1055,6 @@ int iommu_domain_init(struct domain *dom
     int adjust_width, agaw;
     unsigned long sagaw;
     struct acpi_drhd_unit *drhd;
-
-    spin_lock_init(&hd->mapping_lock);
-    spin_lock_init(&hd->iommu_list_lock);
-    INIT_LIST_HEAD(&hd->pdev_list);
-    INIT_LIST_HEAD(&hd->g2m_ioport_list);
 
     if ( !vtd_enabled || list_empty(&acpi_drhd_units) )
         return 0;
@@ -1550,7 +1545,8 @@ static int domain_context_mapped(struct 
     return 0;
 }
 
-int iommu_map_page(struct domain *d, paddr_t gfn, paddr_t mfn)
+int intel_iommu_map_page(
+    struct domain *d, unsigned long gfn, unsigned long mfn)
 {
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu;
@@ -1566,12 +1562,12 @@ int iommu_map_page(struct domain *d, pad
         return 0;
 #endif
 
-    pg = addr_to_dma_page(d, gfn << PAGE_SHIFT_4K);
+    pg = addr_to_dma_page(d, (paddr_t)gfn << PAGE_SHIFT_4K);
     if ( !pg )
         return -ENOMEM;
     pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
     pte += gfn & LEVEL_MASK;
-    dma_set_pte_addr(*pte, mfn << PAGE_SHIFT_4K);
+    dma_set_pte_addr(*pte, (paddr_t)mfn << PAGE_SHIFT_4K);
     dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE);
     iommu_flush_cache_entry(iommu, pte);
     unmap_domain_page(pte);
@@ -1581,7 +1577,7 @@ int iommu_map_page(struct domain *d, pad
         iommu = drhd->iommu;
         if ( cap_caching_mode(iommu->cap) )
             iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
-                                  gfn << PAGE_SHIFT_4K, 1, 0);
+                                  (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
         else if ( cap_rwbf(iommu->cap) )
             iommu_flush_write_buffer(iommu);
     }
@@ -1589,7 +1585,7 @@ int iommu_map_page(struct domain *d, pad
     return 0;
 }
 
-int iommu_unmap_page(struct domain *d, dma_addr_t gfn)
+int intel_iommu_unmap_page(struct domain *d, unsigned long gfn)
 {
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu;
@@ -1603,12 +1599,12 @@ int iommu_unmap_page(struct domain *d, d
         return 0;
 #endif
 
-    dma_pte_clear_one(d, gfn << PAGE_SHIFT_4K);
+    dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
 
     return 0;
 }
 
-int iommu_page_mapping(struct domain *domain, dma_addr_t iova,
+int iommu_page_mapping(struct domain *domain, paddr_t iova,
                        void *hpa, size_t size, int prot)
 {
     struct acpi_drhd_unit *drhd;
@@ -1655,14 +1651,14 @@ int iommu_page_mapping(struct domain *do
     return 0;
 }
 
-int iommu_page_unmapping(struct domain *domain, dma_addr_t addr, size_t size)
+int iommu_page_unmapping(struct domain *domain, paddr_t addr, size_t size)
 {
     dma_pte_clear_range(domain, addr, addr + size);
 
     return 0;
 }
 
-void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry)
+void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry)
 {
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu = NULL;
@@ -1673,7 +1669,7 @@ void iommu_flush(struct domain *d, dma_a
         iommu = drhd->iommu;
         if ( cap_caching_mode(iommu->cap) )
             iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
-                                  gfn << PAGE_SHIFT_4K, 1, 0);
+                                  (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
         else if ( cap_rwbf(iommu->cap) )
             iommu_flush_write_buffer(iommu);
     }
@@ -1921,7 +1917,7 @@ int device_assigned(u8 bus, u8 devfn)
     return 1;
 }
 
-int assign_device(struct domain *d, u8 bus, u8 devfn)
+int intel_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
 {
     struct acpi_rmrr_unit *rmrr;
     struct pci_dev *pdev;
@@ -2151,6 +2147,14 @@ int iommu_resume(void)
     return 0;
 }
 
+struct iommu_ops intel_iommu_ops = {
+    .init = intel_iommu_domain_init,
+    .assign_device  = intel_iommu_assign_device,
+    .teardown = iommu_domain_teardown,
+    .map_page = intel_iommu_map_page,
+    .unmap_page = intel_iommu_unmap_page,
+};
+
 /*
  * Local variables:
  * mode: C
diff -r c9d9bbf1204c -r 72f52dd2dba8 xen/arch/x86/hvm/vmx/vtd/io.c
--- a/xen/arch/x86/hvm/vmx/vtd/io.c     Thu Feb 14 10:36:47 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/vtd/io.c     Thu Feb 14 11:14:17 2008 +0000
@@ -141,7 +141,7 @@ int hvm_do_IRQ_dpci(struct domain *d, un
 {
     struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
 
-    if ( !vtd_enabled || (d == dom0) || (hvm_irq->dpci == NULL) ||
+    if ( !iommu_enabled || (d == dom0) || (hvm_irq->dpci == NULL) ||
          !hvm_irq->dpci->mirq[mirq].valid )
         return 0;
 
@@ -167,7 +167,7 @@ static void hvm_dpci_isairq_eoi(struct d
     int i;
 
     ASSERT(isairq < NR_ISAIRQS);
-    if ( !vtd_enabled || !dpci ||
+    if ( !iommu_enabled || !dpci ||
          !test_bit(isairq, dpci->isairq_map) )
         return;
 
@@ -205,7 +205,7 @@ void hvm_dpci_eoi(struct domain *d, unsi
     struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
     uint32_t device, intx, machine_gsi;
 
-    if ( !vtd_enabled || (hvm_irq_dpci == NULL) ||
+    if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
          (guest_gsi >= NR_ISAIRQS &&
           !hvm_irq_dpci->girq[guest_gsi].valid) )
         return;
@@ -235,50 +235,3 @@ void hvm_dpci_eoi(struct domain *d, unsi
     else
         spin_unlock(&hvm_irq_dpci->dirq_lock);
 }
-
-void iommu_domain_destroy(struct domain *d)
-{
-    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
-    uint32_t i;
-    struct hvm_iommu *hd  = domain_hvm_iommu(d);
-    struct list_head *ioport_list, *digl_list, *tmp;
-    struct g2m_ioport *ioport;
-    struct dev_intx_gsi_link *digl;
-
-    if ( !vtd_enabled )
-        return;
-
-    if ( hvm_irq_dpci != NULL )
-    {
-        for ( i = 0; i < NR_IRQS; i++ )
-            if ( hvm_irq_dpci->mirq[i].valid )
-            {
-                pirq_guest_unbind(d, i);
-                kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
-
-                list_for_each_safe ( digl_list, tmp,
-                                     &hvm_irq_dpci->mirq[i].digl_list )
-                {
-                    digl = list_entry(digl_list,
-                                      struct dev_intx_gsi_link, list);
-                    list_del(&digl->list);
-                    xfree(digl);
-                }
-            }
-
-        d->arch.hvm_domain.irq.dpci = NULL;
-        xfree(hvm_irq_dpci);
-    }
-
-    if ( hd )
-    {
-        list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
-        {
-            ioport = list_entry(ioport_list, struct g2m_ioport, list);
-            list_del(&ioport->list);
-            xfree(ioport);
-        }
-    }
-
-    iommu_domain_teardown(d);
-}
diff -r c9d9bbf1204c -r 72f52dd2dba8 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Feb 14 10:36:47 2008 +0000
+++ b/xen/arch/x86/mm/p2m.c     Thu Feb 14 11:14:17 2008 +0000
@@ -255,8 +255,21 @@ set_p2m_entry(struct domain *d, unsigned
     /* level 1 entry */
     paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1);
 
-    if ( vtd_enabled && (p2mt == p2m_mmio_direct) && is_hvm_domain(d) )
-        iommu_flush(d, gfn, (u64*)p2m_entry);
+    if ( iommu_enabled && is_hvm_domain(d) )
+    {
+        if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+        {
+            if ( (p2mt == p2m_mmio_direct) )
+                iommu_flush(d, gfn, (u64*)p2m_entry);
+        }
+        else if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+        {
+            if ( p2mt == p2m_ram_rw )
+                iommu_map_page(d, gfn, mfn_x(mfn));
+            else
+                iommu_unmap_page(d, gfn);
+        }
+    }
 
     /* Success */
     rv = 1;
diff -r c9d9bbf1204c -r 72f52dd2dba8 xen/include/asm-x86/hvm/iommu.h
--- a/xen/include/asm-x86/hvm/iommu.h   Thu Feb 14 10:36:47 2008 +0000
+++ b/xen/include/asm-x86/hvm/iommu.h   Thu Feb 14 11:14:17 2008 +0000
@@ -48,6 +48,9 @@ struct hvm_iommu {
     int domain_id;
     int paging_mode;
     void *root_table;
+
+    /* iommu_ops */
+    struct iommu_ops *platform_ops;
 };
 
 #endif // __ASM_X86_HVM_IOMMU_H__
diff -r c9d9bbf1204c -r 72f52dd2dba8 
xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      Thu Feb 14 10:36:47 
2008 +0000
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      Thu Feb 14 11:14:17 
2008 +0000
@@ -262,6 +262,10 @@
 #define IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT   12
 #define IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK   0xFFFFFFFF
 #define IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT  0
+
+/* INVALIDATE_DEVTAB_ENTRY command */
+#define IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK   0x0000FFFF
+#define IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT  0
 
 /* Event Log */
 #define IOMMU_EVENT_LOG_BASE_LOW_OFFSET                0x10
@@ -415,5 +419,6 @@
 #define IOMMU_PAGE_TABLE_LEVEL_4        4
 #define IOMMU_IO_WRITE_ENABLED          1
 #define IOMMU_IO_READ_ENABLED           1
+#define HACK_BIOS_SETTINGS                  0
 
 #endif /* _ASM_X86_64_AMD_IOMMU_DEFS_H */
diff -r c9d9bbf1204c -r 72f52dd2dba8 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Thu Feb 14 10:36:47 
2008 +0000
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Thu Feb 14 11:14:17 
2008 +0000
@@ -27,13 +27,15 @@
     list_for_each_entry(amd_iommu, \
         &amd_iommu_head, list)
 
+#define for_each_pdev(domain, pdev) \
+    list_for_each_entry(pdev, \
+         &(domain->arch.hvm_domain.hvm_iommu.pdev_list), list)
+
 #define DMA_32BIT_MASK  0x00000000ffffffffULL
 #define PAGE_ALIGN(addr)    (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
-#define PAGE_SHIFT_4K                   (12)
-#define PAGE_SIZE_4K                    (1UL << PAGE_SHIFT_4K)
-#define PAGE_MASK_4K                    (((u64)-1) << PAGE_SHIFT_4K)
 
-typedef int (*iommu_detect_callback_ptr_t)(u8 bus, u8 dev, u8 func, u8 
cap_ptr);
+typedef int (*iommu_detect_callback_ptr_t)(
+    u8 bus, u8 dev, u8 func, u8 cap_ptr);
 
 /* amd-iommu-detect functions */
 int __init scan_for_iommu(iommu_detect_callback_ptr_t iommu_detect_callback);
@@ -49,16 +51,20 @@ void __init enable_iommu(struct amd_iomm
 void __init enable_iommu(struct amd_iommu *iommu);
 
 /* mapping functions */
-int amd_iommu_map_page(struct domain *d, unsigned long gfn,
-        unsigned long mfn);
+int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
+void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry);
 
 /* device table functions */
 void amd_iommu_set_dev_table_entry(u32 *dte,
         u64 root_ptr, u16 domain_id, u8 paging_mode);
+int amd_iommu_is_dte_page_translation_valid(u32 *entry);
+void invalidate_dev_table_entry(struct amd_iommu *iommu,
+            u16 devic_id);
 
 /* send cmd to iommu */
 int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]);
+void flush_command_buffer(struct amd_iommu *iommu);
 
 /* iommu domain funtions */
 int amd_iommu_domain_init(struct domain *domain);
diff -r c9d9bbf1204c -r 72f52dd2dba8 xen/include/asm-x86/hvm/vmx/intel-iommu.h
--- a/xen/include/asm-x86/hvm/vmx/intel-iommu.h Thu Feb 14 10:36:47 2008 +0000
+++ b/xen/include/asm-x86/hvm/vmx/intel-iommu.h Thu Feb 14 11:14:17 2008 +0000
@@ -422,8 +422,6 @@ struct poll_info {
 #define VTD_PAGE_TABLE_LEVEL_3  3
 #define VTD_PAGE_TABLE_LEVEL_4  4
 
-typedef paddr_t dma_addr_t;
-
 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
 #define MAX_IOMMUS 32
 #define MAX_IOMMU_REGS 0xc0
@@ -447,8 +445,10 @@ struct ir_ctrl {
 };
 
 struct iommu_flush {
-    int (*context)(void *iommu, u16 did, u16 source_id, u8 function_mask, u64 
type, int non_present_entry_flush);
-    int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order, u64 
type, int non_present_entry_flush);
+    int (*context)(void *iommu, u16 did, u16 source_id,
+                   u8 function_mask, u64 type, int non_present_entry_flush);
+    int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order,
+                 u64 type, int non_present_entry_flush);
 };
 
 struct intel_iommu {
diff -r c9d9bbf1204c -r 72f52dd2dba8 xen/include/asm-x86/iommu.h
--- a/xen/include/asm-x86/iommu.h       Thu Feb 14 10:36:47 2008 +0000
+++ b/xen/include/asm-x86/iommu.h       Thu Feb 14 11:14:17 2008 +0000
@@ -28,7 +28,9 @@
 #include <public/domctl.h>
 
 extern int vtd_enabled;
+extern int amd_iommu_enabled;
 
+#define iommu_enabled ( amd_iommu_enabled || vtd_enabled )
 #define domain_hvm_iommu(d)     (&d->arch.hvm_domain.hvm_iommu)
 #define domain_vmx_iommu(d)     (&d->arch.hvm_domain.hvm_iommu.vmx_iommu)
 #define iommu_qi_ctrl(iommu)    (&(iommu->intel.qi_ctrl));
@@ -72,9 +74,9 @@ void iommu_domain_destroy(struct domain 
 void iommu_domain_destroy(struct domain *d);
 int device_assigned(u8 bus, u8 devfn);
 int assign_device(struct domain *d, u8 bus, u8 devfn);
-int iommu_map_page(struct domain *d, dma_addr_t gfn, dma_addr_t mfn);
-int iommu_unmap_page(struct domain *d, dma_addr_t gfn);
-void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry);
+int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
+int iommu_unmap_page(struct domain *d, unsigned long gfn);
+void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry);
 void iommu_set_pgd(struct domain *d);
 void iommu_domain_teardown(struct domain *d);
 int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq);
@@ -89,4 +91,12 @@ void io_apic_write_remap_rte(unsigned in
 #define PT_IRQ_TIME_OUT MILLISECS(8)
 #define VTDPREFIX "[VT-D]"
 
+struct iommu_ops {
+    int (*init)(struct domain *d);
+    int (*assign_device)(struct domain *d, u8 bus, u8 devfn);
+    void (*teardown)(struct domain *d);
+    int (*map_page)(struct domain *d, unsigned long gfn, unsigned long mfn);
+    int (*unmap_page)(struct domain *d, unsigned long gfn);
+};
+
 #endif /* _IOMMU_H_ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.