[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-3.4-testing] AMD IOMMU: support "passthrough" and "no-intremap" parameters.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1256647422 0
# Node ID c1aada7b3341987f1eb5c9f114c12115333844af
# Parent  10b709ce3050f13cd2e2a84bbff37082e6b3462a
AMD IOMMU: support  "passthrough" and "no-intremap" parameters.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
xen-unstable changeset:   20088:4e2ffbd99aeb
xen-unstable date:        Wed Aug 19 14:23:30 2009 +0100

amd iommu: Cleanup initialization functions and fix a fatal page fault
caused by out-of-bounds access to irq_to_iommu array.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
xen-unstable changeset:   20165:6a2a099c8ef4
xen-unstable date:        Mon Sep 07 08:42:50 2009 +0100

amd iommu: Remove a useless flag and fix I/O page fault for hvm
passthru devices.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
xen-unstable changeset:   20166:fdb0228d6689
xen-unstable date:        Mon Sep 07 08:43:14 2009 +0100

AMD IOMMU: Rework of interrupt remapping

1) Parsing IVRS special device entry in order to handle ioapic
remapping correctly.
2) Allocating per-device interrupt remapping tables instead of using a
global interrupt remapping table.
3) Some system devices like io-apic for north-bridge cannot be
discovered during pci device enumeration procedure. To remap interrupt
of those devices, device table update is split into 2 steps, so
that interrupt tables can be bound to device table entry earlier than
I/O page tables.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
xen-unstable changeset:   20215:efec419f6d3e
xen-unstable date:        Wed Sep 16 09:21:56 2009 +0100

AMD IOMMU: Allow enabling iommu debug output at run time.

The old compile-time option is removed.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
xen-unstable changeset:   20222:9c8f9de68a13
xen-unstable date:        Fri Sep 18 08:28:20 2009 +0100

AMD IOMMU: If interrupt remapping is disabled, then do not update
interrupt remapping table with IOAPIC write.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
xen-unstable changeset:   20223:1d503a3e65a0
xen-unstable date:        Fri Sep 18 08:28:52 2009 +0100

AMD IOMMU: Remove unused definitions.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
xen-unstable changeset:   20224:0f05e956957d
xen-unstable date:        Fri Sep 18 08:29:19 2009 +0100

AMD IOMMU: Extend the loop counter for polling completion wait bit.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
xen-unstable changeset:   20225:438c440f2a7f
xen-unstable date:        Fri Sep 18 08:29:46 2009 +0100

AMD IOMMU: Fix boot output on non-iommu system

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
xen-unstable changeset:   20256:d74302fc00e6
xen-unstable date:        Mon Sep 28 08:28:26 2009 +0100
---
 xen/drivers/passthrough/amd/iommu_acpi.c      |  321 ++++++++++++++------------
 xen/drivers/passthrough/amd/iommu_detect.c    |    8 
 xen/drivers/passthrough/amd/iommu_init.c      |  185 ++++++++++----
 xen/drivers/passthrough/amd/iommu_intr.c      |  228 ++++++++++++------
 xen/drivers/passthrough/amd/iommu_map.c       |  202 ++++++++--------
 xen/drivers/passthrough/amd/pci_amd_iommu.c   |  183 +++++++-------
 xen/drivers/passthrough/iommu.c               |    3 
 xen/include/asm-x86/amd-iommu.h               |   14 -
 xen/include/asm-x86/hvm/svm/amd-iommu-acpi.h  |    9 
 xen/include/asm-x86/hvm/svm/amd-iommu-defs.h  |  135 +++-------
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |   41 +--
 11 files changed, 742 insertions(+), 587 deletions(-)

diff -r 10b709ce3050 -r c1aada7b3341 xen/drivers/passthrough/amd/iommu_acpi.c
--- a/xen/drivers/passthrough/amd/iommu_acpi.c  Tue Oct 27 12:39:30 2009 +0000
+++ b/xen/drivers/passthrough/amd/iommu_acpi.c  Tue Oct 27 12:43:42 2009 +0000
@@ -28,6 +28,51 @@ extern unsigned short ivrs_bdf_entries;
 extern unsigned short ivrs_bdf_entries;
 extern struct ivrs_mappings *ivrs_mappings;
 extern unsigned short last_bdf;
+extern int ioapic_bdf[MAX_IO_APICS];
+
+static void add_ivrs_mapping_entry(
+    u16 bdf, u16 alias_id, u8 flags, struct amd_iommu *iommu)
+{
+    u8 sys_mgt, lint1_pass, lint0_pass, nmi_pass, ext_int_pass, init_pass;
+    ASSERT( ivrs_mappings != NULL );
+
+    /* setup requestor id */
+    ivrs_mappings[bdf].dte_requestor_id = alias_id;
+
+    /* override flags for range of devices */
+    sys_mgt = get_field_from_byte(flags,
+                                  AMD_IOMMU_ACPI_SYS_MGT_MASK,
+                                  AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+    lint1_pass = get_field_from_byte(flags,
+                                  AMD_IOMMU_ACPI_LINT1_PASS_MASK,
+                                  AMD_IOMMU_ACPI_LINT1_PASS_SHIFT);
+    lint0_pass = get_field_from_byte(flags,
+                                  AMD_IOMMU_ACPI_LINT0_PASS_MASK,
+                                  AMD_IOMMU_ACPI_LINT0_PASS_SHIFT);
+    nmi_pass = get_field_from_byte(flags,
+                                  AMD_IOMMU_ACPI_NMI_PASS_MASK,
+                                  AMD_IOMMU_ACPI_NMI_PASS_SHIFT);
+    ext_int_pass = get_field_from_byte(flags,
+                                  AMD_IOMMU_ACPI_EINT_PASS_MASK,
+                                  AMD_IOMMU_ACPI_EINT_PASS_SHIFT);
+    init_pass = get_field_from_byte(flags,
+                                  AMD_IOMMU_ACPI_INIT_PASS_MASK,
+                                  AMD_IOMMU_ACPI_INIT_PASS_SHIFT);
+
+    ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
+    ivrs_mappings[bdf].dte_lint1_pass = lint1_pass;
+    ivrs_mappings[bdf].dte_lint0_pass = lint0_pass;
+    ivrs_mappings[bdf].dte_nmi_pass = nmi_pass;
+    ivrs_mappings[bdf].dte_ext_int_pass = ext_int_pass;
+    ivrs_mappings[bdf].dte_init_pass = init_pass;
+
+    /* allocate per-device interrupt remapping table */
+    if ( ivrs_mappings[alias_id].intremap_table == NULL )
+        ivrs_mappings[alias_id].intremap_table =
+            amd_iommu_alloc_intremap_table();
+    /* assgin iommu hardware */
+    ivrs_mappings[bdf].iommu = iommu;
+}
 
 static struct amd_iommu * __init find_iommu_from_bdf_cap(
     u16 bdf, u8 cap_offset)
@@ -131,14 +176,12 @@ static int __init register_exclusion_ran
 {
     unsigned long range_top, iommu_top, length;
     struct amd_iommu *iommu;
-    u16 bus, devfn, req;
-
-    bus = bdf >> 8;
-    devfn = bdf & 0xFF;
-    iommu = find_iommu_for_device(bus, devfn);
+    u16 req;
+
+    iommu = find_iommu_for_device(bdf);
     if ( !iommu )
     {
-        amd_iov_error("IVMD Error: No IOMMU for Dev_Id 0x%x!\n", bdf);
+        AMD_IOMMU_DEBUG("IVMD Error: No IOMMU for Dev_Id 0x%x!\n", bdf);
         return -ENODEV;
     }
     req = ivrs_mappings[bdf].dte_requestor_id;
@@ -176,7 +219,7 @@ static int __init register_exclusion_ran
     unsigned long base, unsigned long limit, u8 iw, u8 ir)
 {
     unsigned long range_top, iommu_top, length;
-    u16 bus, devfn, bdf, req;
+    u16 bdf, req;
 
     /* is part of exclusion range inside of IOMMU virtual address space? */
     /* note: 'limit' parameter is assumed to be page-aligned */
@@ -191,9 +234,7 @@ static int __init register_exclusion_ran
         /* note: these entries are part of the exclusion range */
         for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
         {
-            bus = bdf >> 8;
-            devfn = bdf & 0xFF;
-            if ( iommu == find_iommu_for_device(bus, devfn) )
+            if ( iommu == find_iommu_for_device(bdf) )
             {
                 reserve_unity_map_for_device(bdf, base, length, iw, ir);
                 req = ivrs_mappings[bdf].dte_requestor_id;
@@ -220,7 +261,7 @@ static int __init parse_ivmd_device_sele
     bdf = ivmd_block->header.dev_id;
     if ( bdf >= ivrs_bdf_entries )
     {
-        amd_iov_error("IVMD Error: Invalid Dev_Id 0x%x\n", bdf);
+        AMD_IOMMU_DEBUG("IVMD Error: Invalid Dev_Id 0x%x\n", bdf);
         return -ENODEV;
     }
 
@@ -237,7 +278,7 @@ static int __init parse_ivmd_device_rang
     first_bdf = ivmd_block->header.dev_id;
     if ( first_bdf >= ivrs_bdf_entries )
     {
-        amd_iov_error(
+        AMD_IOMMU_DEBUG(
             "IVMD Error: Invalid Range_First Dev_Id 0x%x\n", first_bdf);
         return -ENODEV;
     }
@@ -245,7 +286,7 @@ static int __init parse_ivmd_device_rang
     last_bdf = ivmd_block->last_dev_id;
     if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) )
     {
-        amd_iov_error(
+        AMD_IOMMU_DEBUG(
             "IVMD Error: Invalid Range_Last Dev_Id 0x%x\n", last_bdf);
         return -ENODEV;
     }
@@ -268,7 +309,7 @@ static int __init parse_ivmd_device_iomm
                                     ivmd_block->cap_offset);
     if ( !iommu )
     {
-        amd_iov_error("IVMD Error: No IOMMU for Dev_Id 0x%x  Cap 0x%x\n",
+        AMD_IOMMU_DEBUG("IVMD Error: No IOMMU for Dev_Id 0x%x  Cap 0x%x\n",
                 ivmd_block->header.dev_id, ivmd_block->cap_offset);
         return -ENODEV;
     }
@@ -285,7 +326,7 @@ static int __init parse_ivmd_block(struc
     if ( ivmd_block->header.length <
          sizeof(struct acpi_ivmd_block_header) )
     {
-        amd_iov_error("IVMD Error: Invalid Block Length!\n");
+        AMD_IOMMU_DEBUG("IVMD Error: Invalid Block Length!\n");
         return -ENODEV;
     }
 
@@ -294,9 +335,9 @@ static int __init parse_ivmd_block(struc
     base = start_addr & PAGE_MASK;
     limit = (start_addr + mem_length - 1) & PAGE_MASK;
 
-    amd_iov_info("IVMD Block: Type 0x%x\n",ivmd_block->header.type);
-    amd_iov_info(" Start_Addr_Phys 0x%lx\n", start_addr);
-    amd_iov_info(" Mem_Length 0x%lx\n", mem_length);
+    AMD_IOMMU_DEBUG("IVMD Block: Type 0x%x\n",ivmd_block->header.type);
+    AMD_IOMMU_DEBUG(" Start_Addr_Phys 0x%lx\n", start_addr);
+    AMD_IOMMU_DEBUG(" Mem_Length 0x%lx\n", mem_length);
 
     if ( get_field_from_byte(ivmd_block->header.flags,
                              AMD_IOMMU_ACPI_EXCLUSION_RANGE_MASK,
@@ -315,7 +356,7 @@ static int __init parse_ivmd_block(struc
     }
     else
     {
-        amd_iov_error("IVMD Error: Invalid Flag Field!\n");
+        AMD_IOMMU_DEBUG("IVMD Error: Invalid Flag Field!\n");
         return -ENODEV;
     }
 
@@ -338,7 +379,7 @@ static int __init parse_ivmd_block(struc
                                        base, limit, iw, ir);
 
     default:
-        amd_iov_error("IVMD Error: Invalid Block Type!\n");
+        AMD_IOMMU_DEBUG("IVMD Error: Invalid Block Type!\n");
         return -ENODEV;
     }
 }
@@ -348,7 +389,7 @@ static u16 __init parse_ivhd_device_padd
 {
     if ( header_length < (block_length + pad_length) )
     {
-        amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n");
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n");
         return 0;
     }
 
@@ -363,16 +404,11 @@ static u16 __init parse_ivhd_device_sele
     bdf = ivhd_device->header.dev_id;
     if ( bdf >= ivrs_bdf_entries )
     {
-        amd_iov_error("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf);
-        return 0;
-    }
-
-    /* override flags for device */
-    ivrs_mappings[bdf].dte_sys_mgt_enable =
-        get_field_from_byte(ivhd_device->header.flags,
-                            AMD_IOMMU_ACPI_SYS_MGT_MASK,
-                            AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
-    ivrs_mappings[bdf].iommu = iommu;
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf);
+        return 0;
+    }
+
+    add_ivrs_mapping_entry(bdf, bdf, ivhd_device->header.flags, iommu);
 
     return sizeof(struct acpi_ivhd_device_header);
 }
@@ -382,19 +418,18 @@ static u16 __init parse_ivhd_device_rang
     u16 header_length, u16 block_length, struct amd_iommu *iommu)
 {
     u16 dev_length, first_bdf, last_bdf, bdf;
-    u8 sys_mgt;
 
     dev_length = sizeof(struct acpi_ivhd_device_range);
     if ( header_length < (block_length + dev_length) )
     {
-        amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n");
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n");
         return 0;
     }
 
     if ( ivhd_device->range.trailer.type !=
          AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
     {
-        amd_iov_error("IVHD Error: "
+        AMD_IOMMU_DEBUG("IVHD Error: "
                 "Invalid Range: End_Type 0x%x\n",
                 ivhd_device->range.trailer.type);
         return 0;
@@ -403,7 +438,7 @@ static u16 __init parse_ivhd_device_rang
     first_bdf = ivhd_device->header.dev_id;
     if ( first_bdf >= ivrs_bdf_entries )
     {
-        amd_iov_error(
+        AMD_IOMMU_DEBUG(
             "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf);
         return 0;
     }
@@ -411,22 +446,15 @@ static u16 __init parse_ivhd_device_rang
     last_bdf = ivhd_device->range.trailer.dev_id;
     if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) )
     {
-        amd_iov_error(
+        AMD_IOMMU_DEBUG(
             "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
         return 0;
     }
 
-    amd_iov_info(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf);
-
-    /* override flags for range of devices */
-    sys_mgt = get_field_from_byte(ivhd_device->header.flags,
-                                  AMD_IOMMU_ACPI_SYS_MGT_MASK,
-                                  AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+    AMD_IOMMU_DEBUG(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf);
+
     for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
-    {
-        ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
-        ivrs_mappings[bdf].iommu = iommu;
-    }
+        add_ivrs_mapping_entry(bdf, bdf, ivhd_device->header.flags, iommu);
 
     return dev_length;
 }
@@ -440,37 +468,27 @@ static u16 __init parse_ivhd_device_alia
     dev_length = sizeof(struct acpi_ivhd_device_alias);
     if ( header_length < (block_length + dev_length) )
     {
-        amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n");
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n");
         return 0;
     }
 
     bdf = ivhd_device->header.dev_id;
     if ( bdf >= ivrs_bdf_entries )
     {
-        amd_iov_error("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf);
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf);
         return 0;
     }
 
     alias_id = ivhd_device->alias.dev_id;
     if ( alias_id >= ivrs_bdf_entries )
     {
-        amd_iov_error("IVHD Error: Invalid Alias Dev_Id 0x%x\n", alias_id);
-        return 0;
-    }
-
-    amd_iov_info(" Dev_Id Alias: 0x%x\n", alias_id);
-
-    /* override requestor_id and flags for device */
-    ivrs_mappings[bdf].dte_requestor_id = alias_id;
-    ivrs_mappings[bdf].dte_sys_mgt_enable =
-        get_field_from_byte(ivhd_device->header.flags,
-                            AMD_IOMMU_ACPI_SYS_MGT_MASK,
-                            AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
-    ivrs_mappings[bdf].iommu = iommu;
-
-    ivrs_mappings[alias_id].dte_sys_mgt_enable =
-        ivrs_mappings[bdf].dte_sys_mgt_enable;
-    ivrs_mappings[alias_id].iommu = iommu;
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Alias Dev_Id 0x%x\n", alias_id);
+        return 0;
+    }
+
+    AMD_IOMMU_DEBUG(" Dev_Id Alias: 0x%x\n", alias_id);
+
+    add_ivrs_mapping_entry(bdf, alias_id, ivhd_device->header.flags, iommu);
 
     return dev_length;
 }
@@ -481,19 +499,18 @@ static u16 __init parse_ivhd_device_alia
 {
 
     u16 dev_length, first_bdf, last_bdf, alias_id, bdf;
-    u8 sys_mgt;
 
     dev_length = sizeof(struct acpi_ivhd_device_alias_range);
     if ( header_length < (block_length + dev_length) )
     {
-        amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n");
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n");
         return 0;
     }
 
     if ( ivhd_device->alias_range.trailer.type !=
          AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
     {
-        amd_iov_error("IVHD Error: "
+        AMD_IOMMU_DEBUG("IVHD Error: "
                 "Invalid Range: End_Type 0x%x\n",
                 ivhd_device->alias_range.trailer.type);
         return 0;
@@ -502,7 +519,7 @@ static u16 __init parse_ivhd_device_alia
     first_bdf = ivhd_device->header.dev_id;
     if ( first_bdf >= ivrs_bdf_entries )
     {
-        amd_iov_error(
+        AMD_IOMMU_DEBUG(
             "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf);
         return 0;
     }
@@ -510,7 +527,7 @@ static u16 __init parse_ivhd_device_alia
     last_bdf = ivhd_device->alias_range.trailer.dev_id;
     if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf )
     {
-        amd_iov_error(
+        AMD_IOMMU_DEBUG(
             "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
         return 0;
     }
@@ -518,25 +535,15 @@ static u16 __init parse_ivhd_device_alia
     alias_id = ivhd_device->alias_range.alias.dev_id;
     if ( alias_id >= ivrs_bdf_entries )
     {
-        amd_iov_error("IVHD Error: Invalid Alias Dev_Id 0x%x\n", alias_id);
-        return 0;
-    }
-
-    amd_iov_info(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf);
-    amd_iov_info(" Dev_Id Alias: 0x%x\n", alias_id);
-
-    /* override requestor_id and flags for range of devices */
-    sys_mgt = get_field_from_byte(ivhd_device->header.flags,
-                                  AMD_IOMMU_ACPI_SYS_MGT_MASK,
-                                  AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Alias Dev_Id 0x%x\n", alias_id);
+        return 0;
+    }
+
+    AMD_IOMMU_DEBUG(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf);
+    AMD_IOMMU_DEBUG(" Dev_Id Alias: 0x%x\n", alias_id);
+
     for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
-    {
-        ivrs_mappings[bdf].dte_requestor_id = alias_id;
-        ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
-        ivrs_mappings[bdf].iommu = iommu;
-    }
-    ivrs_mappings[alias_id].dte_sys_mgt_enable = sys_mgt;
-    ivrs_mappings[alias_id].iommu = iommu;
+        add_ivrs_mapping_entry(bdf, alias_id, ivhd_device->header.flags, 
iommu);
 
     return dev_length;
 }
@@ -550,23 +557,18 @@ static u16 __init parse_ivhd_device_exte
     dev_length = sizeof(struct acpi_ivhd_device_extended);
     if ( header_length < (block_length + dev_length) )
     {
-        amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n");
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n");
         return 0;
     }
 
     bdf = ivhd_device->header.dev_id;
     if ( bdf >= ivrs_bdf_entries )
     {
-        amd_iov_error("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf);
-        return 0;
-    }
-
-    /* override flags for device */
-    ivrs_mappings[bdf].dte_sys_mgt_enable =
-        get_field_from_byte(ivhd_device->header.flags,
-                            AMD_IOMMU_ACPI_SYS_MGT_MASK,
-                            AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
-    ivrs_mappings[bdf].iommu = iommu;
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf);
+        return 0;
+    }
+
+    add_ivrs_mapping_entry(bdf, bdf, ivhd_device->header.flags, iommu);
 
     return dev_length;
 }
@@ -576,19 +578,18 @@ static u16 __init parse_ivhd_device_exte
     u16 header_length, u16 block_length, struct amd_iommu *iommu)
 {
     u16 dev_length, first_bdf, last_bdf, bdf;
-    u8 sys_mgt;
 
     dev_length = sizeof(struct acpi_ivhd_device_extended_range);
     if ( header_length < (block_length + dev_length) )
     {
-        amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n");
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n");
         return 0;
     }
 
     if ( ivhd_device->extended_range.trailer.type !=
          AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
     {
-        amd_iov_error("IVHD Error: "
+        AMD_IOMMU_DEBUG("IVHD Error: "
                 "Invalid Range: End_Type 0x%x\n",
                 ivhd_device->extended_range.trailer.type);
         return 0;
@@ -597,7 +598,7 @@ static u16 __init parse_ivhd_device_exte
     first_bdf = ivhd_device->header.dev_id;
     if ( first_bdf >= ivrs_bdf_entries )
     {
-        amd_iov_error(
+        AMD_IOMMU_DEBUG(
             "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf);
         return 0;
     }
@@ -605,24 +606,43 @@ static u16 __init parse_ivhd_device_exte
     last_bdf = ivhd_device->extended_range.trailer.dev_id;
     if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) )
     {
-        amd_iov_error(
+        AMD_IOMMU_DEBUG(
             "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
         return 0;
     }
 
-    amd_iov_info(" Dev_Id Range: 0x%x -> 0x%x\n",
+    AMD_IOMMU_DEBUG(" Dev_Id Range: 0x%x -> 0x%x\n",
             first_bdf, last_bdf);
 
-    /* override flags for range of devices */
-    sys_mgt = get_field_from_byte(ivhd_device->header.flags,
-                                  AMD_IOMMU_ACPI_SYS_MGT_MASK,
-                                  AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
     for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
-    {
-        ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
-        ivrs_mappings[bdf].iommu = iommu;
-    }
-
+        add_ivrs_mapping_entry(bdf, bdf, ivhd_device->header.flags, iommu);
+
+    return dev_length;
+}
+
+static u16 __init parse_ivhd_device_special(
+    union acpi_ivhd_device *ivhd_device,
+    u16 header_length, u16 block_length, struct amd_iommu *iommu)
+{
+    u16 dev_length, bdf;
+
+    dev_length = sizeof(struct acpi_ivhd_device_special);
+    if ( header_length < (block_length + dev_length) )
+    {
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n");
+        return 0;
+    }
+
+    bdf = ivhd_device->special.dev_id;
+    if ( bdf >= ivrs_bdf_entries )
+    {
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf);
+        return 0;
+    }
+
+    add_ivrs_mapping_entry(bdf, bdf, ivhd_device->header.flags, iommu);
+    /* set device id of ioapic */
+    ioapic_bdf[ivhd_device->special.handle] = bdf;
     return dev_length;
 }
 
@@ -635,7 +655,7 @@ static int __init parse_ivhd_block(struc
     if ( ivhd_block->header.length <
          sizeof(struct acpi_ivhd_block_header) )
     {
-        amd_iov_error("IVHD Error: Invalid Block Length!\n");
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Block Length!\n");
         return -ENODEV;
     }
 
@@ -643,7 +663,7 @@ static int __init parse_ivhd_block(struc
                                     ivhd_block->cap_offset);
     if ( !iommu )
     {
-        amd_iov_error("IVHD Error: No IOMMU for Dev_Id 0x%x  Cap 0x%x\n",
+        AMD_IOMMU_DEBUG("IVHD Error: No IOMMU for Dev_Id 0x%x  Cap 0x%x\n",
                 ivhd_block->header.dev_id, ivhd_block->cap_offset);
         return -ENODEV;
     }
@@ -656,10 +676,10 @@ static int __init parse_ivhd_block(struc
         ivhd_device = (union acpi_ivhd_device *)
             ((u8 *)ivhd_block + block_length);
 
-        amd_iov_info( "IVHD Device Entry:\n");
-        amd_iov_info( " Type 0x%x\n", ivhd_device->header.type);
-        amd_iov_info( " Dev_Id 0x%x\n", ivhd_device->header.dev_id);
-        amd_iov_info( " Flags 0x%x\n", ivhd_device->header.flags);
+        AMD_IOMMU_DEBUG( "IVHD Device Entry:\n");
+        AMD_IOMMU_DEBUG( " Type 0x%x\n", ivhd_device->header.type);
+        AMD_IOMMU_DEBUG( " Dev_Id 0x%x\n", ivhd_device->header.dev_id);
+        AMD_IOMMU_DEBUG( " Flags 0x%x\n", ivhd_device->header.flags);
 
         switch ( ivhd_device->header.type )
         {
@@ -701,8 +721,13 @@ static int __init parse_ivhd_block(struc
                 ivhd_device,
                 ivhd_block->header.length, block_length, iommu);
             break;
+        case AMD_IOMMU_ACPI_IVHD_DEV_SPECIAL:
+            dev_length = parse_ivhd_device_special(
+                ivhd_device,
+                ivhd_block->header.length, block_length, iommu);
+            break;
         default:
-            amd_iov_error("IVHD Error: Invalid Device Type!\n");
+            AMD_IOMMU_DEBUG("IVHD Error: Invalid Device Type!\n");
             dev_length = 0;
             break;
         }
@@ -734,7 +759,7 @@ static int __init parse_ivrs_block(struc
         return parse_ivmd_block(ivmd_block);
 
     default:
-        amd_iov_error("IVRS Error: Invalid Block Type!\n");
+        AMD_IOMMU_DEBUG("IVRS Error: Invalid Block Type!\n");
         return -ENODEV;
     }
 
@@ -743,40 +768,37 @@ static int __init parse_ivrs_block(struc
 
 static void __init dump_acpi_table_header(struct acpi_table_header *table)
 {
-#ifdef AMD_IOV_DEBUG
     int i;
 
-    amd_iov_info("ACPI Table:\n");
-    amd_iov_info(" Signature ");
+    AMD_IOMMU_DEBUG("ACPI Table:\n");
+    AMD_IOMMU_DEBUG(" Signature ");
     for ( i = 0; i < ACPI_NAME_SIZE; i++ )
         printk("%c", table->signature[i]);
     printk("\n");
 
-    amd_iov_info(" Length 0x%x\n", table->length);
-    amd_iov_info(" Revision 0x%x\n", table->revision);
-    amd_iov_info(" CheckSum 0x%x\n", table->checksum);
-
-    amd_iov_info(" OEM_Id ");
+    AMD_IOMMU_DEBUG(" Length 0x%x\n", table->length);
+    AMD_IOMMU_DEBUG(" Revision 0x%x\n", table->revision);
+    AMD_IOMMU_DEBUG(" CheckSum 0x%x\n", table->checksum);
+
+    AMD_IOMMU_DEBUG(" OEM_Id ");
     for ( i = 0; i < ACPI_OEM_ID_SIZE; i++ )
         printk("%c", table->oem_id[i]);
     printk("\n");
 
-    amd_iov_info(" OEM_Table_Id ");
+    AMD_IOMMU_DEBUG(" OEM_Table_Id ");
     for ( i = 0; i < ACPI_OEM_TABLE_ID_SIZE; i++ )
         printk("%c", table->oem_table_id[i]);
     printk("\n");
 
-    amd_iov_info(" OEM_Revision 0x%x\n", table->oem_revision);
-
-    amd_iov_info(" Creator_Id ");
+    AMD_IOMMU_DEBUG(" OEM_Revision 0x%x\n", table->oem_revision);
+
+    AMD_IOMMU_DEBUG(" Creator_Id ");
     for ( i = 0; i < ACPI_NAME_SIZE; i++ )
         printk("%c", table->asl_compiler_id[i]);
     printk("\n");
 
-    amd_iov_info(" Creator_Revision 0x%x\n",
+    AMD_IOMMU_DEBUG(" Creator_Revision 0x%x\n",
            table->asl_compiler_revision);
-#endif
-
 }
 
 static int __init parse_ivrs_table(struct acpi_table_header *_table)
@@ -788,7 +810,8 @@ static int __init parse_ivrs_table(struc
 
     BUG_ON(!table);
 
-    dump_acpi_table_header(table);
+    if ( amd_iommu_debug )
+        dump_acpi_table_header(table);
 
     /* parse IVRS blocks */
     length = sizeof(struct acpi_ivrs_table_header);
@@ -797,15 +820,15 @@ static int __init parse_ivrs_table(struc
         ivrs_block = (struct acpi_ivrs_block_header *)
             ((u8 *)table + length);
 
-        amd_iov_info("IVRS Block:\n");
-        amd_iov_info(" Type 0x%x\n", ivrs_block->type);
-        amd_iov_info(" Flags 0x%x\n", ivrs_block->flags);
-        amd_iov_info(" Length 0x%x\n", ivrs_block->length);
-        amd_iov_info(" Dev_Id 0x%x\n", ivrs_block->dev_id);
+        AMD_IOMMU_DEBUG("IVRS Block:\n");
+        AMD_IOMMU_DEBUG(" Type 0x%x\n", ivrs_block->type);
+        AMD_IOMMU_DEBUG(" Flags 0x%x\n", ivrs_block->flags);
+        AMD_IOMMU_DEBUG(" Length 0x%x\n", ivrs_block->length);
+        AMD_IOMMU_DEBUG(" Dev_Id 0x%x\n", ivrs_block->dev_id);
 
         if ( table->length < (length + ivrs_block->length) )
         {
-            amd_iov_error("IVRS Error: "
+            AMD_IOMMU_DEBUG("IVRS Error: "
                     "Table Length Exceeded: 0x%x -> 0x%lx\n",
                     table->length,
                     (length + ivrs_block->length));
@@ -834,7 +857,7 @@ static int __init detect_iommu_acpi(stru
         checksum += raw_table[i];
     if ( checksum )
     {
-        amd_iov_error("IVRS Error: "
+        AMD_IOMMU_DEBUG("IVRS Error: "
                 "Invalid Checksum 0x%x\n", checksum);
         return -ENODEV;
     }
@@ -868,7 +891,7 @@ static int __init get_last_bdf_ivhd(void
     if ( ivhd_block->header.length <
          sizeof(struct acpi_ivhd_block_header) )
     {
-        amd_iov_error("IVHD Error: Invalid Block Length!\n");
+        AMD_IOMMU_DEBUG("IVHD Error: Invalid Block Length!\n");
         return -ENODEV;
     }
 
@@ -911,8 +934,12 @@ static int __init get_last_bdf_ivhd(void
             UPDATE_LAST_BDF(ivhd_device->extended_range.trailer.dev_id)
             dev_length = sizeof(struct acpi_ivhd_device_extended_range);
             break;
+        case AMD_IOMMU_ACPI_IVHD_DEV_SPECIAL:
+            UPDATE_LAST_BDF(ivhd_device->special.dev_id)
+            dev_length = sizeof(struct acpi_ivhd_device_special);
+            break;
         default:
-            amd_iov_error("IVHD Error: Invalid Device Type!\n");
+            AMD_IOMMU_DEBUG("IVHD Error: Invalid Device Type!\n");
             dev_length = 0;
             break;
         }
diff -r 10b709ce3050 -r c1aada7b3341 xen/drivers/passthrough/amd/iommu_detect.c
--- a/xen/drivers/passthrough/amd/iommu_detect.c        Tue Oct 27 12:39:30 
2009 +0000
+++ b/xen/drivers/passthrough/amd/iommu_detect.c        Tue Oct 27 12:43:42 
2009 +0000
@@ -62,7 +62,7 @@ static int __init get_iommu_msi_capabili
     if ( !iommu->msi_cap )
         return -ENODEV;
 
-    amd_iov_info("Found MSI capability block \n");
+    AMD_IOMMU_DEBUG("Found MSI capability block \n");
     control = pci_conf_read16(bus, dev, func,
             iommu->msi_cap + PCI_MSI_FLAGS);
     iommu->maskbit = control & PCI_MSI_FLAGS_MASKBIT;
@@ -103,21 +103,21 @@ int __init amd_iommu_detect_one_acpi(voi
 
     if ( ivhd_block->header.length < sizeof(struct acpi_ivhd_block_header) )
     {
-        amd_iov_error("Invalid IVHD Block Length!\n");
+        AMD_IOMMU_DEBUG("Invalid IVHD Block Length!\n");
         return -ENODEV;
     }
 
     if ( !ivhd_block->header.dev_id ||
         !ivhd_block->cap_offset || !ivhd_block->mmio_base)
     {
-        amd_iov_error("Invalid IVHD Block!\n");
+        AMD_IOMMU_DEBUG("Invalid IVHD Block!\n");
         return -ENODEV;
     }
 
     iommu = (struct amd_iommu *) xmalloc(struct amd_iommu);
     if ( !iommu )
     {
-        amd_iov_error("Error allocating amd_iommu\n");
+        AMD_IOMMU_DEBUG("Error allocating amd_iommu\n");
         return -ENOMEM;
     }
     memset(iommu, 0, sizeof(struct amd_iommu));
diff -r 10b709ce3050 -r c1aada7b3341 xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c  Tue Oct 27 12:39:30 2009 +0000
+++ b/xen/drivers/passthrough/amd/iommu_init.c  Tue Oct 27 12:43:42 2009 +0000
@@ -37,13 +37,14 @@ struct list_head amd_iommu_head;
 struct list_head amd_iommu_head;
 struct table_struct device_table;
 
+int amd_iommu_enabled = 0;
 static int __init map_iommu_mmio_region(struct amd_iommu *iommu)
 {
     unsigned long mfn;
 
     if ( nr_amd_iommus > MAX_AMD_IOMMUS )
     {
-        amd_iov_error("nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus);
+        AMD_IOMMU_DEBUG("nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus);
         return -ENOMEM;
     }
 
@@ -432,7 +433,7 @@ static void parse_event_log_entry(u32 en
     if ( (code > IOMMU_EVENT_INVALID_DEV_REQUEST) ||
         (code < IOMMU_EVENT_ILLEGAL_DEV_TABLE_ENTRY) )
     {
-        amd_iov_error("Invalid event log entry!\n");
+        AMD_IOMMU_DEBUG("Invalid event log entry!\n");
         return;
     }
 
@@ -445,8 +446,8 @@ static void parse_event_log_entry(u32 en
                                            IOMMU_EVENT_DOMAIN_ID_MASK,
                                            IOMMU_EVENT_DOMAIN_ID_SHIFT);
         addr= (u64*) (entry + 2);
-        printk(XENLOG_ERR "AMD_IOV: "
-            "%s: domain:%d, device id:0x%x, fault address:0x%"PRIx64"\n",
+        printk(XENLOG_ERR "AMD-Vi: "
+            "%s: domain:%d, device id:0x%04x, fault address:0x%016"PRIx64"\n",
             event_str[code-1], domain_id, device_id, *addr);
     }
 }
@@ -495,7 +496,7 @@ static int set_iommu_interrupt_handler(s
         irq_desc[vector].handler = &no_irq_type;
         vector_to_iommu[vector] = NULL;
         free_irq_vector(vector);
-        amd_iov_error("can't request irq\n");
+        AMD_IOMMU_DEBUG("can't request irq\n");
         return 0;
     }
 
@@ -547,12 +548,6 @@ static void __init deallocate_iommu_tabl
     }
 }
 
-static void __init deallocate_iommu_tables(struct amd_iommu *iommu)
-{
-    deallocate_iommu_table_struct(&iommu->cmd_buffer);
-    deallocate_iommu_table_struct(&iommu->event_log);
-}
-
 static int __init allocate_iommu_table_struct(struct table_struct *table,
                                               const char *name)
 {
@@ -564,7 +559,7 @@ static int __init allocate_iommu_table_s
 
         if ( table->buffer == NULL )
         {
-            amd_iov_error("Error allocating %s\n", name);
+            AMD_IOMMU_DEBUG("Error allocating %s\n", name);
             return -ENOMEM;
         }
         memset(table->buffer, 0, PAGE_SIZE * (1UL << order));
@@ -572,7 +567,7 @@ static int __init allocate_iommu_table_s
     return 0;
 }
 
-static int __init allocate_iommu_tables(struct amd_iommu *iommu)
+static int __init allocate_cmd_buffer(struct amd_iommu *iommu)
 {
     /* allocate 'command buffer' in power of 2 increments of 4K */
     iommu->cmd_buffer_tail = 0;
@@ -583,10 +578,12 @@ static int __init allocate_iommu_tables(
     iommu->cmd_buffer.entries = iommu->cmd_buffer.alloc_size /
                                 IOMMU_CMD_BUFFER_ENTRY_SIZE;
 
-    if ( allocate_iommu_table_struct(&iommu->cmd_buffer, "Command Buffer") != 
0 )
-        goto error_out;
-
-    /* allocate 'event log' in power of 2 increments of 4K */
+    return (allocate_iommu_table_struct(&iommu->cmd_buffer, "Command Buffer"));
+}
+
+static int __init allocate_event_log(struct amd_iommu *iommu)
+{
+   /* allocate 'event log' in power of 2 increments of 4K */
     iommu->event_log_head = 0;
     iommu->event_log.alloc_size = PAGE_SIZE <<
                                   get_order_from_bytes(
@@ -595,19 +592,15 @@ static int __init allocate_iommu_tables(
     iommu->event_log.entries = iommu->event_log.alloc_size /
                                IOMMU_EVENT_LOG_ENTRY_SIZE;
 
-    if ( allocate_iommu_table_struct(&iommu->event_log, "Event Log") != 0 )
-        goto error_out;
-
-    return 0;
-
- error_out:
-    deallocate_iommu_tables(iommu);
-    return -ENOMEM;
+    return (allocate_iommu_table_struct(&iommu->event_log, "Event Log"));
 }
 
 int __init amd_iommu_init_one(struct amd_iommu *iommu)
 {
-    if ( allocate_iommu_tables(iommu) != 0 )
+    if ( allocate_cmd_buffer(iommu) != 0 )
+        goto error_out;
+
+    if ( allocate_event_log(iommu) != 0 )
         goto error_out;
 
     if ( map_iommu_mmio_region(iommu) != 0 )
@@ -631,23 +624,47 @@ int __init amd_iommu_init_one(struct amd
     return 0;
 
 error_out:
-    return -ENODEV;
-}
-
-void __init amd_iommu_init_cleanup(void)
+    return -ENODEV;;
+}
+
+static void __init amd_iommu_init_cleanup(void)
 {
     struct amd_iommu *iommu, *next;
+    int bdf;
 
     list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list )
     {
         list_del(&iommu->list);
         if ( iommu->enabled )
         {
-            deallocate_iommu_tables(iommu);
+            deallocate_iommu_table_struct(&iommu->cmd_buffer);
+            deallocate_iommu_table_struct(&iommu->event_log);
             unmap_iommu_mmio_region(iommu);
         }
         xfree(iommu);
     }
+
+        /* free interrupt remapping table */
+    for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
+    {
+        if ( ivrs_mappings[bdf].intremap_table )
+            amd_iommu_free_intremap_table(bdf);
+    }
+
+    /* free device table */
+    deallocate_iommu_table_struct(&device_table);
+
+    /* free ivrs_mappings[] */
+    if ( ivrs_mappings )
+    {
+        xfree(ivrs_mappings);
+        ivrs_mappings = NULL;
+    }
+
+    iommu_enabled = 0;
+    iommu_passthrough = 0;
+    iommu_intremap = 0;
+    amd_iommu_enabled = 0;
 }
 
 static int __init init_ivrs_mapping(void)
@@ -659,7 +676,7 @@ static int __init init_ivrs_mapping(void
     ivrs_mappings = xmalloc_array( struct ivrs_mappings, ivrs_bdf_entries);
     if ( ivrs_mappings == NULL )
     {
-        amd_iov_error("Error allocating IVRS Mappings table\n");
+        AMD_IOMMU_DEBUG("Error allocating IVRS Mappings table\n");
         return -ENOMEM;
     }
     memset(ivrs_mappings, 0, ivrs_bdf_entries * sizeof(struct ivrs_mappings));
@@ -673,12 +690,28 @@ static int __init init_ivrs_mapping(void
         ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_DISABLED;
         ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_DISABLED;
         ivrs_mappings[bdf].iommu = NULL;
+
+        ivrs_mappings[bdf].intremap_table = NULL;
+        ivrs_mappings[bdf].dte_lint1_pass = IOMMU_CONTROL_DISABLED;
+        ivrs_mappings[bdf].dte_lint0_pass = IOMMU_CONTROL_DISABLED;
+        ivrs_mappings[bdf].dte_nmi_pass = IOMMU_CONTROL_DISABLED;
+        ivrs_mappings[bdf].dte_ext_int_pass = IOMMU_CONTROL_DISABLED;
+        ivrs_mappings[bdf].dte_init_pass = IOMMU_CONTROL_DISABLED;
+
+        spin_lock_init(&ivrs_mappings[bdf].intremap_lock);
     }
     return 0;
 }
 
 static int __init amd_iommu_setup_device_table(void)
 {
+    int bdf;
+    void *intr_tb, *dte;
+    int sys_mgt, dev_ex, lint1_pass, lint0_pass,
+       nmi_pass, ext_int_pass, init_pass;
+
+    BUG_ON( (ivrs_bdf_entries == 0) || (amd_iommu_enabled) );
+
     /* allocate 'device table' on a 4K boundary */
     device_table.alloc_size = PAGE_SIZE <<
                               get_order_from_bytes(
@@ -687,34 +720,80 @@ static int __init amd_iommu_setup_device
     device_table.entries = device_table.alloc_size /
                            IOMMU_DEV_TABLE_ENTRY_SIZE;
 
-    return ( allocate_iommu_table_struct(&device_table, "Device Table") );
-}
-
-int __init amd_iommu_setup_shared_tables(void)
-{
-    BUG_ON( !ivrs_bdf_entries );
+    if ( allocate_iommu_table_struct(&device_table, "Device Table") != 0 )
+         return -ENOMEM;
+
+    /* Add device table entries */
+    for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
+    {
+        intr_tb = ivrs_mappings[bdf].intremap_table;
+
+        if ( intr_tb )
+        {
+            sys_mgt = ivrs_mappings[bdf].dte_sys_mgt_enable;
+            dev_ex = ivrs_mappings[bdf].dte_allow_exclusion;
+
+            /* get interrupt remapping settings */
+            lint1_pass = ivrs_mappings[bdf].dte_lint1_pass;
+            lint0_pass = ivrs_mappings[bdf].dte_lint0_pass;
+            nmi_pass = ivrs_mappings[bdf].dte_nmi_pass;
+            ext_int_pass = ivrs_mappings[bdf].dte_ext_int_pass;
+            init_pass = ivrs_mappings[bdf].dte_init_pass;
+
+            /* add device table entry */
+            dte = device_table.buffer + (bdf * IOMMU_DEV_TABLE_ENTRY_SIZE);
+            amd_iommu_add_dev_table_entry(
+                dte, sys_mgt, dev_ex, lint1_pass, lint0_pass,
+                nmi_pass, ext_int_pass, init_pass);
+
+            amd_iommu_set_intremap_table(
+                dte, (u64)virt_to_maddr(intr_tb), iommu_intremap);
+
+            AMD_IOMMU_DEBUG("Add device table entry at DTE:0x%04x, "
+                "intremap_table:0x%016"PRIx64"\n", bdf,
+                (u64)virt_to_maddr(intr_tb));
+        }
+    }
+
+    return 0;
+}
+
+int __init amd_iommu_init(void)
+{
+    struct amd_iommu *iommu;
+
+    BUG_ON( !iommu_found() );
+
+    ivrs_bdf_entries = amd_iommu_get_ivrs_dev_entries();
+
+    if ( !ivrs_bdf_entries )
+        goto error_out;
 
     if ( init_ivrs_mapping() != 0 )
         goto error_out;
 
+    if ( amd_iommu_update_ivrs_mapping_acpi() != 0 )
+        goto error_out;
+
+    /* initialize io-apic interrupt remapping entries */
+    if ( amd_iommu_setup_ioapic_remapping() != 0 )
+        goto error_out;
+
+    /* allocate and initialize a global device table shared by all iommus */
     if ( amd_iommu_setup_device_table() != 0 )
         goto error_out;
 
-    if ( amd_iommu_setup_intremap_table() != 0 )
-        goto error_out;
-
+    /* per iommu initialization  */
+    for_each_amd_iommu ( iommu )
+        if ( amd_iommu_init_one(iommu) != 0 )
+            goto error_out;
+
+    amd_iommu_enabled = 1;
     return 0;
 
 error_out:
-    deallocate_intremap_table();
-    deallocate_iommu_table_struct(&device_table);
-
-    if ( ivrs_mappings )
-    {
-        xfree(ivrs_mappings);
-        ivrs_mappings = NULL;
-    }
-    return -ENOMEM;
+    amd_iommu_init_cleanup();
+    return -ENODEV;
 }
 
 static void disable_iommu(struct amd_iommu *iommu)
@@ -749,15 +828,13 @@ static void invalidate_all_domain_pages(
 
 static void invalidate_all_devices(void)
 {
-    u16 bus, devfn, bdf, req_id;
+    int bdf, req_id;
     unsigned long flags;
     struct amd_iommu *iommu;
 
     for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
     {
-        bus = bdf >> 8;
-        devfn = bdf & 0xFF;
-        iommu = find_iommu_for_device(bus, devfn);
+        iommu = find_iommu_for_device(bdf);
         req_id = ivrs_mappings[bdf].dte_requestor_id;
         if ( iommu )
         {
diff -r 10b709ce3050 -r c1aada7b3341 xen/drivers/passthrough/amd/iommu_intr.c
--- a/xen/drivers/passthrough/amd/iommu_intr.c  Tue Oct 27 12:39:30 2009 +0000
+++ b/xen/drivers/passthrough/amd/iommu_intr.c  Tue Oct 27 12:43:42 2009 +0000
@@ -23,16 +23,25 @@
 #include <asm/hvm/svm/amd-iommu-proto.h>
 
 #define INTREMAP_TABLE_ORDER    1
-static DEFINE_SPINLOCK(int_remap_table_lock);
-void *int_remap_table = NULL;
-
-static u8 *get_intremap_entry(u8 vector, u8 dm)
+int ioapic_bdf[MAX_IO_APICS];
+extern struct ivrs_mappings *ivrs_mappings;
+extern unsigned short ivrs_bdf_entries;
+extern int amd_iommu_enabled;
+
+static int get_intremap_requestor_id(int bdf)
+{
+    ASSERT( bdf < ivrs_bdf_entries );
+    return ivrs_mappings[bdf].dte_requestor_id;
+}
+
+static u8 *get_intremap_entry(int bdf, u8 vector, u8 dm)
 {
     u8 *table;
     int offset = 0;
-    table = (u8*)int_remap_table;
-
-    BUG_ON( !table );
+
+    table = (u8*)ivrs_mappings[bdf].intremap_table;
+    ASSERT( table != NULL );
+
     offset = (dm << INT_REMAP_INDEX_DM_SHIFT) & INT_REMAP_INDEX_DM_MASK;
     offset |= (vector << INT_REMAP_INDEX_VECTOR_SHIFT ) & 
         INT_REMAP_INDEX_VECTOR_MASK;
@@ -83,6 +92,8 @@ void invalidate_interrupt_table(struct a
 }
 
 static void update_intremap_entry_from_ioapic(
+    int bdf,
+    struct amd_iommu *iommu,
     struct IO_APIC_route_entry *ioapic_rte,
     unsigned int rte_upper, unsigned int value)
 {
@@ -90,42 +101,42 @@ static void update_intremap_entry_from_i
     u32* entry;
     u8 delivery_mode, dest, vector, dest_mode;
     struct IO_APIC_route_entry *rte = ioapic_rte;
-
-    spin_lock_irqsave(&int_remap_table_lock, flags);
-
-    if ( rte_upper )
-    {
-        dest = (value >> 24) & 0xFF;
+    int req_id;
+
+    req_id = get_intremap_requestor_id(bdf);
+
+    /* only remap interrupt vector when lower 32 bits in ioapic ire changed */
+    if ( likely(!rte_upper) )
+    {
         delivery_mode = rte->delivery_mode;
         vector = rte->vector;
         dest_mode = rte->dest_mode;
-        entry = (u32*)get_intremap_entry((u8)rte->vector,
-                                        (u8)rte->delivery_mode);
+        dest = rte->dest.logical.logical_dest;
+
+        spin_lock_irqsave(&ivrs_mappings[req_id].intremap_lock, flags);
+        entry = (u32*)get_intremap_entry(req_id, vector, delivery_mode);
         update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
-    }
-
-    spin_unlock_irqrestore(&int_remap_table_lock, flags);
-    return;
-}
-
-extern int nr_ioapic_registers[MAX_IO_APICS];
-extern int nr_ioapics;
-
-int __init amd_iommu_setup_intremap_table(void)
+        spin_unlock_irqrestore(&ivrs_mappings[req_id].intremap_lock, flags);
+
+       if ( iommu->enabled )
+        {
+            spin_lock_irqsave(&iommu->lock, flags);
+            invalidate_interrupt_table(iommu, req_id);
+            flush_command_buffer(iommu);
+            spin_unlock_irqrestore(&iommu->lock, flags);
+        }
+    }
+}
+
+int __init amd_iommu_setup_ioapic_remapping(void)
 {
     struct IO_APIC_route_entry rte = {0};
     unsigned long flags;
     u32* entry;
     int apic, pin;
     u8 delivery_mode, dest, vector, dest_mode;
-
-    if ( int_remap_table == NULL )
-    {
-        int_remap_table = __alloc_amd_iommu_tables(INTREMAP_TABLE_ORDER);
-        if ( int_remap_table == NULL )
-            return -ENOMEM;
-        memset(int_remap_table, 0, PAGE_SIZE * (1UL << INTREMAP_TABLE_ORDER));
-    }
+    u16 bdf, req_id;
+    struct amd_iommu *iommu;
 
     /* Read ioapic entries and update interrupt remapping table accordingly */
     for ( apic = 0; apic < nr_ioapics; apic++ )
@@ -138,18 +149,34 @@ int __init amd_iommu_setup_intremap_tabl
             if ( rte.mask == 1 )
                 continue;
 
+            /* get device id of ioapic devices */
+            bdf = ioapic_bdf[IO_APIC_ID(apic)];
+            iommu = find_iommu_for_device(bdf);
+            if ( !iommu )
+            {
+                AMD_IOMMU_DEBUG(
+                "Fail to find iommu for ioapic device id = 0x%04x\n", bdf);
+                continue;
+            }
+
+            req_id = get_intremap_requestor_id(bdf);
             delivery_mode = rte.delivery_mode;
             vector = rte.vector;
             dest_mode = rte.dest_mode;
-            if ( dest_mode == 0 )
-                dest = rte.dest.physical.physical_dest & 0xf;
-            else
-                dest = rte.dest.logical.logical_dest & 0xff;
-
-            spin_lock_irqsave(&int_remap_table_lock, flags);
-            entry = (u32*)get_intremap_entry(vector, delivery_mode);
+            dest = rte.dest.logical.logical_dest;
+
+            spin_lock_irqsave(&ivrs_mappings[req_id].intremap_lock, flags);
+            entry = (u32*)get_intremap_entry(req_id, vector, delivery_mode);
             update_intremap_entry(entry, vector, delivery_mode, dest_mode, 
dest);
-            spin_unlock_irqrestore(&int_remap_table_lock, flags);
+            spin_unlock_irqrestore(&ivrs_mappings[req_id].intremap_lock, 
flags);
+
+            if ( iommu->enabled )
+            {
+                spin_lock_irqsave(&iommu->lock, flags);
+                invalidate_interrupt_table(iommu, req_id);
+                flush_command_buffer(iommu);
+                spin_unlock_irqrestore(&iommu->lock, flags);
+            }
         }
     }
     return 0;
@@ -160,17 +187,27 @@ void amd_iommu_ioapic_update_ire(
 {
     struct IO_APIC_route_entry ioapic_rte = { 0 };
     unsigned int rte_upper = (reg & 1) ? 1 : 0;
-    int saved_mask;
+    int saved_mask, bdf;
+    struct amd_iommu *iommu;
 
     *IO_APIC_BASE(apic) = reg;
     *(IO_APIC_BASE(apic)+4) = value;
 
-    if ( int_remap_table == NULL )
-        return;
-    if ( !rte_upper )
-        return;
-
-    reg--;
+    if ( !amd_iommu_enabled )
+        return;
+
+    /* get device id of ioapic devices */
+    bdf = ioapic_bdf[IO_APIC_ID(apic)];
+    iommu = find_iommu_for_device(bdf);
+    if ( !iommu )
+    {
+        AMD_IOMMU_DEBUG(
+            "Fail to find iommu for ioapic device id = 0x%04x\n", bdf);
+        return;
+    }
+    if ( rte_upper )
+        return;
+
     /* read both lower and upper 32-bits of rte entry */
     *IO_APIC_BASE(apic) = reg;
     *(((u32 *)&ioapic_rte) + 0) = *(IO_APIC_BASE(apic)+4);
@@ -184,7 +221,8 @@ void amd_iommu_ioapic_update_ire(
     *(IO_APIC_BASE(apic)+4) = *(((int *)&ioapic_rte)+0);
     ioapic_rte.mask = saved_mask;
 
-    update_intremap_entry_from_ioapic(&ioapic_rte, rte_upper, value);
+    update_intremap_entry_from_ioapic(
+        bdf, iommu, &ioapic_rte, rte_upper, value);
 
     /* unmask the interrupt after we have updated the intremap table */
     *IO_APIC_BASE(apic) = reg;
@@ -196,28 +234,49 @@ static void update_intremap_entry_from_m
 {
     unsigned long flags;
     u32* entry;
-    u16 dev_id;
+    u16 bdf, req_id, alias_id;
 
     u8 delivery_mode, dest, vector, dest_mode;
 
-    dev_id = (pdev->bus << 8) | pdev->devfn;
-
-    spin_lock_irqsave(&int_remap_table_lock, flags);
+    bdf = (pdev->bus << 8) | pdev->devfn;
+    req_id = get_dma_requestor_id(bdf);
+
+    spin_lock_irqsave(&ivrs_mappings[req_id].intremap_lock, flags);
     dest_mode = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
     delivery_mode = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
     vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK;
     dest = (msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff;
 
-    entry = (u32*)get_intremap_entry((u8)vector, (u8)delivery_mode);
+    entry = (u32*)get_intremap_entry(req_id, vector, delivery_mode);
     update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
-    spin_unlock_irqrestore(&int_remap_table_lock, flags);
-
-    spin_lock_irqsave(&iommu->lock, flags);
-    invalidate_interrupt_table(iommu, dev_id);
-    flush_command_buffer(iommu);
-    spin_unlock_irqrestore(&iommu->lock, flags);
-
-    return;
+    spin_unlock_irqrestore(&ivrs_mappings[req_id].intremap_lock, flags);
+
+    /*
+     * In some special cases, a pci-e device(e.g SATA controller in IDE mode)
+     * will use alias id to index interrupt remapping table.
+     * We have to setup a secondary interrupt remapping entry to satisfy those
+     * devices.
+     */
+    alias_id = get_intremap_requestor_id(bdf);
+    if ( ( bdf != alias_id ) &&
+        ivrs_mappings[alias_id].intremap_table != NULL )
+    {
+        spin_lock_irqsave(&ivrs_mappings[alias_id].intremap_lock, flags);
+        entry = (u32*)get_intremap_entry(alias_id, vector, delivery_mode);
+        update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
+        invalidate_interrupt_table(iommu, alias_id);
+        spin_unlock_irqrestore(&ivrs_mappings[alias_id].intremap_lock, flags);
+    }
+
+    if ( iommu->enabled )
+    {
+        spin_lock_irqsave(&iommu->lock, flags);
+        invalidate_interrupt_table(iommu, req_id);
+        if ( alias_id != req_id )
+            invalidate_interrupt_table(iommu, alias_id);
+        flush_command_buffer(iommu);
+        spin_unlock_irqrestore(&iommu->lock, flags);
+    }
 }
 
 void amd_iommu_msi_msg_update_ire(
@@ -226,21 +285,38 @@ void amd_iommu_msi_msg_update_ire(
     struct pci_dev *pdev = msi_desc->dev;
     struct amd_iommu *iommu = NULL;
 
-    iommu = find_iommu_for_device(pdev->bus, pdev->devfn);
-
-    if ( !iommu || !int_remap_table )
-        return;
+    if ( !amd_iommu_enabled )
+        return;
+
+    iommu = find_iommu_for_device((pdev->bus << 8) | pdev->devfn);
+
+    if ( !iommu )
+    {
+        AMD_IOMMU_DEBUG(
+            "Fail to find iommu for MSI device id = 0x%04x\n",
+            (pdev->bus << 8) | pdev->devfn);
+        return;
+    }
 
     update_intremap_entry_from_msi_msg(iommu, pdev, msg);
 }
 
-int __init deallocate_intremap_table(void)
-{
-    if ( int_remap_table )
-    {
-        __free_amd_iommu_tables(int_remap_table, INTREMAP_TABLE_ORDER);
-        int_remap_table = NULL;
-    }
-
-    return 0;
-}
+void __init amd_iommu_free_intremap_table(int bdf)
+{
+    void *tb = ivrs_mappings[bdf].intremap_table;
+
+    if ( tb )
+    {
+        __free_amd_iommu_tables(tb, INTREMAP_TABLE_ORDER);
+        ivrs_mappings[bdf].intremap_table = NULL;
+    }
+}
+
+void* __init amd_iommu_alloc_intremap_table(void)
+{
+    void *tb;
+    tb = __alloc_amd_iommu_tables(INTREMAP_TABLE_ORDER);
+    BUG_ON(tb == NULL);
+    memset(tb, 0, PAGE_SIZE * (1UL << INTREMAP_TABLE_ORDER));
+    return tb;
+}
diff -r 10b709ce3050 -r c1aada7b3341 xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c   Tue Oct 27 12:39:30 2009 +0000
+++ b/xen/drivers/passthrough/amd/iommu_map.c   Tue Oct 27 12:43:42 2009 +0000
@@ -23,8 +23,6 @@
 #include <asm/amd-iommu.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
 
-long amd_iommu_poll_comp_wait = COMPLETION_WAIT_DEFAULT_POLLING_COUNT;
-
 static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
 {
     u32 tail, head, *cmd_buffer;
@@ -131,32 +129,24 @@ void flush_command_buffer(struct amd_iom
                          IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]);
     send_iommu_command(iommu, cmd);
 
-    /* wait for 'ComWaitInt' to signal comp#endifletion? */
-    if ( amd_iommu_poll_comp_wait )
-    {
-        loop_count = amd_iommu_poll_comp_wait;
-        do {
-            status = readl(iommu->mmio_base +
-                           IOMMU_STATUS_MMIO_OFFSET);
-            comp_wait = get_field_from_reg_u32(
-                status,
-                IOMMU_STATUS_COMP_WAIT_INT_MASK,
-                IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
-            --loop_count;
-        } while ( loop_count && !comp_wait );
-
-        if ( comp_wait )
-        {
-            /* clear 'ComWaitInt' in status register (WIC) */
-            status &= IOMMU_STATUS_COMP_WAIT_INT_MASK;
-            writel(status, iommu->mmio_base +
-                   IOMMU_STATUS_MMIO_OFFSET);
-        }
-        else
-        {
-            amd_iov_warning("Warning: ComWaitInt bit did not assert!\n");
-        }
-    }
+    /* Make loop_count long enough for polling completion wait bit */
+    loop_count = 1000;
+    do {
+        status = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
+        comp_wait = get_field_from_reg_u32(status,
+            IOMMU_STATUS_COMP_WAIT_INT_MASK,
+            IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
+        --loop_count;
+    } while ( !comp_wait && loop_count );
+
+    if ( comp_wait )
+    {
+        /* clear 'ComWaitInt' in status register (WIC) */
+        status &= IOMMU_STATUS_COMP_WAIT_INT_MASK;
+        writel(status, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
+        return;
+    }
+    AMD_IOMMU_DEBUG("Warning: ComWaitInt bit did not assert!\n");
 }
 
 static void clear_iommu_l1e_present(u64 l2e, unsigned long gfn)
@@ -254,40 +244,62 @@ static void amd_iommu_set_page_directory
     pde[0] = entry;
 }
 
-void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,
-                                   u16 domain_id, u8 sys_mgt, u8 dev_ex,
-                                   u8 paging_mode)
+void amd_iommu_set_root_page_table(
+    u32 *dte, u64 root_ptr, u16 domain_id, u8 paging_mode, u8 valid)
 {
     u64 addr_hi, addr_lo;
     u32 entry;
-
-    dte[7] = dte[6] = 0;
+    set_field_in_reg_u32(domain_id, 0,
+                         IOMMU_DEV_TABLE_DOMAIN_ID_MASK,
+                         IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry);
+    dte[2] = entry;
+
+    addr_lo = root_ptr & DMA_32BIT_MASK;
+    addr_hi = root_ptr >> 32;
+
+    set_field_in_reg_u32((u32)addr_hi, 0,
+                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
+                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_MASK,
+                         IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_DEV_TABLE_IO_READ_PERMISSION_MASK,
+                         IOMMU_DEV_TABLE_IO_READ_PERMISSION_SHIFT, &entry);
+    dte[1] = entry;
+
+    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
+                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
+                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT, &entry);
+    set_field_in_reg_u32(paging_mode, entry,
+                         IOMMU_DEV_TABLE_PAGING_MODE_MASK,
+                         IOMMU_DEV_TABLE_PAGING_MODE_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
+                         IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry);
+    set_field_in_reg_u32(valid ? IOMMU_CONTROL_ENABLED :
+                         IOMMU_CONTROL_DISABLED, entry,
+                         IOMMU_DEV_TABLE_VALID_MASK,
+                         IOMMU_DEV_TABLE_VALID_SHIFT, &entry);
+    dte[0] = entry;
+}
+
+void amd_iommu_set_intremap_table(u32 *dte, u64 intremap_ptr, u8 int_valid)
+{
+    u64 addr_hi, addr_lo;
+    u32 entry;
 
     addr_lo = intremap_ptr & DMA_32BIT_MASK;
     addr_hi = intremap_ptr >> 32;
 
-    set_field_in_reg_u32((u32)addr_hi, 0,
+    entry = dte[5];
+    set_field_in_reg_u32((u32)addr_hi, entry,
                         IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_MASK,
                         IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-                        IOMMU_DEV_TABLE_INIT_PASSTHRU_MASK,
-                        IOMMU_DEV_TABLE_INIT_PASSTHRU_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-                        IOMMU_DEV_TABLE_EINT_PASSTHRU_MASK,
-                        IOMMU_DEV_TABLE_EINT_PASSTHRU_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-                        IOMMU_DEV_TABLE_NMI_PASSTHRU_MASK,
-                        IOMMU_DEV_TABLE_NMI_PASSTHRU_SHIFT, &entry);
     /* Fixed and arbitrated interrupts remapepd */
     set_field_in_reg_u32(2, entry,
                         IOMMU_DEV_TABLE_INT_CONTROL_MASK,
                         IOMMU_DEV_TABLE_INT_CONTROL_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-                        IOMMU_DEV_TABLE_LINT0_ENABLE_MASK,
-                        IOMMU_DEV_TABLE_LINT0_ENABLE_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-                        IOMMU_DEV_TABLE_LINT1_ENABLE_MASK,
-                        IOMMU_DEV_TABLE_LINT1_ENABLE_SHIFT, &entry);
     dte[5] = entry;
 
     set_field_in_reg_u32((u32)addr_lo >> 6, 0,
@@ -297,13 +309,47 @@ void amd_iommu_set_dev_table_entry(u32 *
     set_field_in_reg_u32(0xB, entry,
                          IOMMU_DEV_TABLE_INT_TABLE_LENGTH_MASK,
                          IOMMU_DEV_TABLE_INT_TABLE_LENGTH_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+    /* ignore unmapped interrupts */
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_MASK,
+                         IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_SHIFT, &entry);
+    set_field_in_reg_u32(int_valid ? IOMMU_CONTROL_ENABLED :
+                         IOMMU_CONTROL_DISABLED, entry,
                          IOMMU_DEV_TABLE_INT_VALID_MASK,
                          IOMMU_DEV_TABLE_INT_VALID_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-                         IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_MASK,
-                         IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_SHIFT, &entry);
     dte[4] = entry;
+}
+
+void amd_iommu_add_dev_table_entry(
+    u32 *dte, u8 sys_mgt, u8 dev_ex, u8 lint1_pass, u8 lint0_pass, 
+    u8 nmi_pass, u8 ext_int_pass, u8 init_pass)
+{
+    u32 entry;
+
+    dte[7] = dte[6] = dte[4] = dte[2] = dte[1] = dte[0] = 0;
+
+
+    set_field_in_reg_u32(init_pass ? IOMMU_CONTROL_ENABLED :
+                        IOMMU_CONTROL_DISABLED, 0,
+                        IOMMU_DEV_TABLE_INIT_PASSTHRU_MASK,
+                        IOMMU_DEV_TABLE_INIT_PASSTHRU_SHIFT, &entry);
+    set_field_in_reg_u32(ext_int_pass ? IOMMU_CONTROL_ENABLED :
+                        IOMMU_CONTROL_DISABLED, entry,
+                        IOMMU_DEV_TABLE_EINT_PASSTHRU_MASK,
+                        IOMMU_DEV_TABLE_EINT_PASSTHRU_SHIFT, &entry);
+    set_field_in_reg_u32(nmi_pass ? IOMMU_CONTROL_ENABLED :
+                        IOMMU_CONTROL_DISABLED, entry,
+                        IOMMU_DEV_TABLE_NMI_PASSTHRU_MASK,
+                        IOMMU_DEV_TABLE_NMI_PASSTHRU_SHIFT, &entry);
+    set_field_in_reg_u32(lint0_pass ? IOMMU_CONTROL_ENABLED :
+                        IOMMU_CONTROL_DISABLED, entry,
+                        IOMMU_DEV_TABLE_LINT0_ENABLE_MASK,
+                        IOMMU_DEV_TABLE_LINT0_ENABLE_SHIFT, &entry);
+    set_field_in_reg_u32(lint1_pass ? IOMMU_CONTROL_ENABLED :
+                        IOMMU_CONTROL_DISABLED, entry,
+                        IOMMU_DEV_TABLE_LINT1_ENABLE_MASK,
+                        IOMMU_DEV_TABLE_LINT1_ENABLE_SHIFT, &entry);
+    dte[5] = entry;
 
     set_field_in_reg_u32(sys_mgt, 0,
                          IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK,
@@ -312,38 +358,6 @@ void amd_iommu_set_dev_table_entry(u32 *
                          IOMMU_DEV_TABLE_ALLOW_EXCLUSION_MASK,
                          IOMMU_DEV_TABLE_ALLOW_EXCLUSION_SHIFT, &entry);
     dte[3] = entry;
-
-    set_field_in_reg_u32(domain_id, 0,
-                         IOMMU_DEV_TABLE_DOMAIN_ID_MASK,
-                         IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry);
-    dte[2] = entry;
-
-    addr_lo = root_ptr & DMA_32BIT_MASK;
-    addr_hi = root_ptr >> 32;
-    set_field_in_reg_u32((u32)addr_hi, 0,
-                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
-                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-                         IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_MASK,
-                         IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-                         IOMMU_DEV_TABLE_IO_READ_PERMISSION_MASK,
-                         IOMMU_DEV_TABLE_IO_READ_PERMISSION_SHIFT, &entry);
-    dte[1] = entry;
-
-    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
-                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
-                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT, &entry);
-    set_field_in_reg_u32(paging_mode, entry,
-                         IOMMU_DEV_TABLE_PAGING_MODE_MASK,
-                         IOMMU_DEV_TABLE_PAGING_MODE_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-                         IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
-                         IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-                         IOMMU_DEV_TABLE_VALID_MASK,
-                         IOMMU_DEV_TABLE_VALID_SHIFT, &entry);
-    dte[0] = entry;
 }
 
 u64 amd_iommu_get_next_table_from_pte(u32 *entry)
@@ -388,16 +402,6 @@ void invalidate_dev_table_entry(struct a
     cmd[1] = entry;
 
     send_iommu_command(iommu, cmd);
-}
-
-int amd_iommu_is_dte_page_translation_valid(u32 *entry)
-{
-    return (get_field_from_reg_u32(entry[0],
-                                   IOMMU_DEV_TABLE_VALID_MASK,
-                                   IOMMU_DEV_TABLE_VALID_SHIFT) &&
-            get_field_from_reg_u32(entry[0],
-                                   IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
-                                   IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT));
 }
 
 static u64 iommu_l2e_from_pfn(struct page_info *table, int level,
@@ -464,7 +468,7 @@ int amd_iommu_map_page(struct domain *d,
     if ( iommu_l2e == 0 )
     {
         spin_unlock(&hd->mapping_lock);
-        amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
+        AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
         domain_crash(d);
         return -EFAULT;
     }
@@ -497,7 +501,7 @@ int amd_iommu_unmap_page(struct domain *
     if ( iommu_l2e == 0 )
     {
         spin_unlock(&hd->mapping_lock);
-        amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
+        AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
         domain_crash(d);
         return -EFAULT;
     }
@@ -538,7 +542,7 @@ int amd_iommu_reserve_domain_unity_map(
         if ( iommu_l2e == 0 )
         {
             spin_unlock(&hd->mapping_lock);
-            amd_iov_error("Invalid IO pagetable entry phys_addr = %lx\n",
+            AMD_IOMMU_DEBUG("Invalid IO pagetable entry phys_addr = %lx\n",
                           phys_addr);
             domain_crash(domain);
             return -EFAULT;
@@ -588,7 +592,7 @@ int amd_iommu_sync_p2m(struct domain *d)
         {
             spin_unlock(&d->page_alloc_lock);
             spin_unlock(&hd->mapping_lock);
-            amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
+            AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
             domain_crash(d);
             return -EFAULT;
         }
diff -r 10b709ce3050 -r c1aada7b3341 xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c       Tue Oct 27 12:39:30 
2009 +0000
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c       Tue Oct 27 12:43:42 
2009 +0000
@@ -26,81 +26,91 @@
 
 extern unsigned short ivrs_bdf_entries;
 extern struct ivrs_mappings *ivrs_mappings;
-extern void *int_remap_table;
-
-int __init amd_iommu_init(void)
-{
-    struct amd_iommu *iommu;
-
-    BUG_ON( !iommu_found() );
-
-    ivrs_bdf_entries = amd_iommu_get_ivrs_dev_entries();
-
-    if ( !ivrs_bdf_entries )
-        goto error_out;
-
-    if ( amd_iommu_setup_shared_tables() != 0 )
-        goto error_out;
-
-    if ( amd_iommu_update_ivrs_mapping_acpi() != 0 )
-        goto error_out;
-
-    for_each_amd_iommu ( iommu )
-        if ( amd_iommu_init_one(iommu) != 0 )
-            goto error_out;
-
-    return 0;
-
-error_out:
-    amd_iommu_init_cleanup();
-    return -ENODEV;
-}
-
-struct amd_iommu *find_iommu_for_device(int bus, int devfn)
-{
-    u16 bdf = (bus << 8) | devfn;
+
+struct amd_iommu *find_iommu_for_device(int bdf)
+{
     BUG_ON ( bdf >= ivrs_bdf_entries );
     return ivrs_mappings[bdf].iommu;
 }
 
+/*
+ * Some devices will use alias id and original device id to index interrupt
+ * table and I/O page table respectively. Such devices will have
+ * both alias entry and select entry in IVRS structure.
+ *
+ * Return original device id, if device has valid interrupt remapping
+ * table setup for both select entry and alias entry.
+ */
+int get_dma_requestor_id(u16 bdf)
+{
+    int req_id;
+
+    BUG_ON ( bdf >= ivrs_bdf_entries );
+    req_id = ivrs_mappings[bdf].dte_requestor_id;
+    if ( (ivrs_mappings[bdf].intremap_table != NULL) &&
+         (ivrs_mappings[req_id].intremap_table != NULL) )
+        req_id = bdf;
+
+    return req_id;
+}
+
+static int is_translation_valid(u32 *entry)
+{
+    return (get_field_from_reg_u32(entry[0],
+                                   IOMMU_DEV_TABLE_VALID_MASK,
+                                   IOMMU_DEV_TABLE_VALID_SHIFT) &&
+            get_field_from_reg_u32(entry[0],
+                                   IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
+                                   IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT));
+}
+
+static void disable_translation(u32 *dte)
+{
+    u32 entry;
+
+    entry = dte[0];
+    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
+                         IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
+                         IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
+                         IOMMU_DEV_TABLE_VALID_MASK,
+                         IOMMU_DEV_TABLE_VALID_SHIFT, &entry);
+    dte[0] = entry;
+}
+
 static void amd_iommu_setup_domain_device(
     struct domain *domain, struct amd_iommu *iommu, int bdf)
 {
     void *dte;
     unsigned long flags;
-    int req_id;
-    u8 sys_mgt, dev_ex;
+    int req_id, valid = 1;
+
     struct hvm_iommu *hd = domain_hvm_iommu(domain);
 
-    BUG_ON( !hd->root_table || !hd->paging_mode || !int_remap_table );
+    BUG_ON( !hd->root_table || !hd->paging_mode || !iommu->dev_table.buffer );
+
+    if ( iommu_passthrough && (domain->domain_id == 0) )
+        valid = 0;
 
     /* get device-table entry */
-    req_id = ivrs_mappings[bdf].dte_requestor_id;
+    req_id = get_dma_requestor_id(bdf);
     dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
 
     spin_lock_irqsave(&iommu->lock, flags);
 
-    if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
+    if ( !is_translation_valid((u32 *)dte) )
     {
         /* bind DTE to domain page-tables */
-        sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable;
-        dev_ex = ivrs_mappings[req_id].dte_allow_exclusion;
-
-        amd_iommu_set_dev_table_entry((u32 *)dte,
-                                      page_to_maddr(hd->root_table),
-                                      virt_to_maddr(int_remap_table),
-                                      hd->domain_id, sys_mgt, dev_ex,
-                                      hd->paging_mode);
+        amd_iommu_set_root_page_table(
+            (u32 *)dte, page_to_maddr(hd->root_table), hd->domain_id,
+            hd->paging_mode, valid);
 
         invalidate_dev_table_entry(iommu, req_id);
-        invalidate_interrupt_table(iommu, req_id);
         flush_command_buffer(iommu);
-        amd_iov_info("Enable DTE:0x%x, "
-                "root_table:%"PRIx64", interrupt_table:%"PRIx64", "
-                "domain_id:%d, paging_mode:%d\n",
-                req_id, (u64)page_to_maddr(hd->root_table),
-                (u64)virt_to_maddr(int_remap_table), hd->domain_id,
-                hd->paging_mode);
+
+        AMD_IOMMU_DEBUG("Setup I/O page table at DTE:0x%04x, "
+        "root_table:0x%016"PRIx64", domain_id:%d, paging_mode:%d\n", req_id,
+        (u64)page_to_maddr(hd->root_table), hd->domain_id, hd->paging_mode);
     }
 
     spin_unlock_irqrestore(&iommu->lock, flags);
@@ -133,12 +143,15 @@ static void amd_iommu_setup_dom0_devices
                 list_add(&pdev->domain_list, &d->arch.pdev_list);
 
                 bdf = (bus << 8) | pdev->devfn;
-                /* supported device? */
-                iommu = (bdf < ivrs_bdf_entries) ?
-                    find_iommu_for_device(bus, pdev->devfn) : NULL;
-
-                if ( iommu )
-                    amd_iommu_setup_domain_device(d, iommu, bdf);
+                iommu = find_iommu_for_device(bdf);
+
+                if ( !iommu )
+                {
+                    AMD_IOMMU_DEBUG("Fail to find iommu for device "
+                        "%02x:%02x.%x\n", bus, dev, func);
+                    continue;
+                }
+                amd_iommu_setup_domain_device(d, iommu, bdf);
             }
         }
     }
@@ -149,21 +162,15 @@ int amd_iov_detect(void)
 {
     INIT_LIST_HEAD(&amd_iommu_head);
 
-    if ( amd_iommu_detect_acpi() != 0 )
-    {
-        amd_iov_error("Error detection\n");
-        return -ENODEV;
-    }
-
-    if ( !iommu_found() )
-    {
-        printk("AMD_IOV: IOMMU not found!\n");
+    if ( (amd_iommu_detect_acpi() != 0) || (iommu_found() == 0) )
+    {
+        printk("AMD-Vi: IOMMU not found!\n");
         return -ENODEV;
     }
 
     if ( amd_iommu_init() != 0 )
     {
-        amd_iov_error("Error initialization\n");
+        printk("AMD-Vi: Error initialization\n");
         return -ENODEV;
     }
     return 0;
@@ -242,16 +249,17 @@ static void amd_iommu_disable_domain_dev
     unsigned long flags;
     int req_id;
 
-    req_id = ivrs_mappings[bdf].dte_requestor_id;
+    BUG_ON ( iommu->dev_table.buffer == NULL );
+    req_id = get_dma_requestor_id(bdf);
     dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
 
-    spin_lock_irqsave(&iommu->lock, flags); 
-    if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
-    {
-        memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE);
+    spin_lock_irqsave(&iommu->lock, flags);
+    if ( is_translation_valid((u32 *)dte) )
+    {
+        disable_translation((u32 *)dte);
         invalidate_dev_table_entry(iommu, req_id);
         flush_command_buffer(iommu);
-        amd_iov_info("Disable DTE:0x%x,"
+        AMD_IOMMU_DEBUG("Disable DTE:0x%x,"
                 " domain_id:%d, paging_mode:%d\n",
                 req_id,  domain_hvm_iommu(domain)->domain_id,
                 domain_hvm_iommu(domain)->paging_mode);
@@ -272,13 +280,10 @@ static int reassign_device( struct domai
         return -ENODEV;
 
     bdf = (bus << 8) | devfn;
-    /* supported device? */
-    iommu = (bdf < ivrs_bdf_entries) ?
-    find_iommu_for_device(bus, pdev->devfn) : NULL;
-
+    iommu = find_iommu_for_device(bdf);
     if ( !iommu )
     {
-        amd_iov_error("Fail to find iommu."
+        AMD_IOMMU_DEBUG("Fail to find iommu."
             " %x:%x.%x cannot be assigned to domain %d\n", 
             bus, PCI_SLOT(devfn), PCI_FUNC(devfn), target->domain_id);
         return -ENODEV;
@@ -290,7 +295,7 @@ static int reassign_device( struct domai
     pdev->domain = target;
 
     amd_iommu_setup_domain_device(target, iommu, bdf);
-    amd_iov_info("reassign %x:%x.%x domain %d -> domain %d\n",
+    AMD_IOMMU_DEBUG("Reassign %x:%x.%x domain %d -> domain %d\n",
                  bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
                  source->domain_id, target->domain_id);
 
@@ -300,7 +305,7 @@ static int amd_iommu_assign_device(struc
 static int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
 {
     int bdf = (bus << 8) | devfn;
-    int req_id = ivrs_mappings[bdf].dte_requestor_id;
+    int req_id = get_dma_requestor_id(bdf);
 
     amd_iommu_sync_p2m(d);
 
@@ -377,12 +382,10 @@ static int amd_iommu_add_device(struct p
         return -EINVAL;
 
     bdf = (pdev->bus << 8) | pdev->devfn;
-    iommu = (bdf < ivrs_bdf_entries) ?
-    find_iommu_for_device(pdev->bus, pdev->devfn) : NULL;
-
+    iommu = find_iommu_for_device(bdf);
     if ( !iommu )
     {
-        amd_iov_error("Fail to find iommu."
+        AMD_IOMMU_DEBUG("Fail to find iommu."
             " %x:%x.%x cannot be assigned to domain %d\n", 
             pdev->bus, PCI_SLOT(pdev->devfn),
             PCI_FUNC(pdev->devfn), pdev->domain->domain_id);
@@ -401,12 +404,10 @@ static int amd_iommu_remove_device(struc
         return -EINVAL;
 
     bdf = (pdev->bus << 8) | pdev->devfn;
-    iommu = (bdf < ivrs_bdf_entries) ?
-    find_iommu_for_device(pdev->bus, pdev->devfn) : NULL;
-
+    iommu = find_iommu_for_device(bdf);
     if ( !iommu )
     {
-        amd_iov_error("Fail to find iommu."
+        AMD_IOMMU_DEBUG("Fail to find iommu."
             " %x:%x.%x cannot be removed from domain %d\n", 
             pdev->bus, PCI_SLOT(pdev->devfn),
             PCI_FUNC(pdev->devfn), pdev->domain->domain_id);
@@ -422,7 +423,7 @@ static int amd_iommu_group_id(u8 bus, u8
     int rt;
     int bdf = (bus << 8) | devfn;
     rt = ( bdf < ivrs_bdf_entries ) ?
-        ivrs_mappings[bdf].dte_requestor_id :
+        get_dma_requestor_id(bdf) :
         bdf;
     return rt;
 }
diff -r 10b709ce3050 -r c1aada7b3341 xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c   Tue Oct 27 12:39:30 2009 +0000
+++ b/xen/drivers/passthrough/iommu.c   Tue Oct 27 12:43:42 2009 +0000
@@ -46,6 +46,7 @@ int iommu_snoop = 0;
 int iommu_snoop = 0;
 int iommu_qinval = 0;
 int iommu_intremap = 0;
+int amd_iommu_debug = 0;
 
 static void __init parse_iommu_param(char *s)
 {
@@ -77,6 +78,8 @@ static void __init parse_iommu_param(cha
             iommu_qinval = 0;
         else if ( !strcmp(s, "no-intremap") )
             iommu_intremap = 0;
+        else if ( !strcmp(s, "amd-iommu-debug") )
+            amd_iommu_debug = 1;
 
         s = ss + 1;
     } while ( ss );
diff -r 10b709ce3050 -r c1aada7b3341 xen/include/asm-x86/amd-iommu.h
--- a/xen/include/asm-x86/amd-iommu.h   Tue Oct 27 12:39:30 2009 +0000
+++ b/xen/include/asm-x86/amd-iommu.h   Tue Oct 27 12:43:42 2009 +0000
@@ -58,9 +58,6 @@ struct amd_iommu {
     u8 pass_pw;
     u8 ht_tunnel_enable;
 
-    int last_downstream_bus;
-    int downstream_bus_present[PCI_MAX_BUS_COUNT];
-
     void *mmio_base;
     unsigned long mmio_base_phys;
 
@@ -92,5 +89,16 @@ struct ivrs_mappings {
     unsigned long addr_range_start;
     unsigned long addr_range_length;
     struct amd_iommu *iommu;
+
+    /* per device interrupt remapping table */
+    void *intremap_table;
+    spinlock_t intremap_lock;
+
+    /* interrupt remapping settings */
+    u8 dte_lint1_pass;
+    u8 dte_lint0_pass;
+    u8 dte_nmi_pass;
+    u8 dte_ext_int_pass;
+    u8 dte_init_pass;
 };
 #endif /* _ASM_X86_64_AMD_IOMMU_H */
diff -r 10b709ce3050 -r c1aada7b3341 
xen/include/asm-x86/hvm/svm/amd-iommu-acpi.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-acpi.h      Tue Oct 27 12:39:30 
2009 +0000
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-acpi.h      Tue Oct 27 12:43:42 
2009 +0000
@@ -43,6 +43,7 @@
 #define AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_RANGE    67
 #define AMD_IOMMU_ACPI_IVHD_DEV_EXT_SELECT 70
 #define AMD_IOMMU_ACPI_IVHD_DEV_EXT_RANGE  71
+#define AMD_IOMMU_ACPI_IVHD_DEV_SPECIAL    72
 
 /* IVHD IOMMU Flags */
 #define AMD_IOMMU_ACPI_COHERENT_MASK       0x20
@@ -151,6 +152,13 @@ struct acpi_ivhd_device_extended_range {
    struct acpi_ivhd_device_trailer trailer;
 };
 
+struct acpi_ivhd_device_special {
+   struct acpi_ivhd_device_header header;
+   u8  handle;
+   u16 dev_id;
+   u8  variety;
+};
+
 union acpi_ivhd_device {
    struct acpi_ivhd_device_header header;
    struct acpi_ivhd_device_range range;
@@ -158,6 +166,7 @@ union acpi_ivhd_device {
    struct acpi_ivhd_device_alias_range alias_range;
    struct acpi_ivhd_device_extended extended;
    struct acpi_ivhd_device_extended_range extended_range;
+   struct acpi_ivhd_device_special special;
 };
 
 struct acpi_ivmd_block_header {
diff -r 10b709ce3050 -r c1aada7b3341 
xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      Tue Oct 27 12:39:30 
2009 +0000
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      Tue Oct 27 12:43:42 
2009 +0000
@@ -21,105 +21,56 @@
 #ifndef _ASM_X86_64_AMD_IOMMU_DEFS_H
 #define _ASM_X86_64_AMD_IOMMU_DEFS_H
 
-/* Reserve some non-mapped pages to handle error conditions.
- * 'bad_dma_address' will point to these reserved pages, and
- * the mapping funtions will return 'bad_dma_address' if there
- * are not enough page table entries available.
- */
-#define IOMMU_RESERVED_BASE_ADDR       0
-#define IOMMU_RESERVED_PAGES           32
-
-/* IOMMU ComWaitInt polling after issuing a COMPLETION_WAIT command */
-#define COMPLETION_WAIT_DEFAULT_POLLING_COUNT  10
-
 /* IOMMU Command Buffer entries: in power of 2 increments, minimum of 256 */
-#define IOMMU_CMD_BUFFER_DEFAULT_ENTRIES       512
+#define IOMMU_CMD_BUFFER_DEFAULT_ENTRIES    512
 
 /* IOMMU Event Log entries: in power of 2 increments, minimum of 256 */
 #define IOMMU_EVENT_LOG_DEFAULT_ENTRIES     512
 
-#define BITMAP_ENTRIES_PER_BYTE                8
-
-#define PTE_PER_TABLE_SHIFT            9
-#define PTE_PER_TABLE_SIZE             (1 << PTE_PER_TABLE_SHIFT)
-#define PTE_PER_TABLE_MASK             (~(PTE_PER_TABLE_SIZE - 1))
-#define PTE_PER_TABLE_ALIGN(entries)   \
-       (((entries) + PTE_PER_TABLE_SIZE - 1) & PTE_PER_TABLE_MASK)
-#define PTE_PER_TABLE_ALLOC(entries)   \
-       PAGE_SIZE * (PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT)
-
-/* 0-based aperture order (represents virtual address space for DMA mappings */
-#define APERTURE_ORDER_FOR_32B_APERTURE                0
-#define APERTURE_ORDER_FOR_64MB_APERTURE       1
-#define APERTURE_ORDER_FOR_128MB_APERTURE      2
-#define APERTURE_ORDER_FOR_256MB_APERTURE      3
-#define APERTURE_ORDER_FOR_512MB_APERTURE      4
-#define APERTURE_ORDER_FOR_1GB_APERTURE                5
-#define APERTURE_ORDER_FOR_MAX_APERTURE                
APERTURE_ORDER_FOR_1GB_APERTURE
-
-/* The minimum 32MB aperture requires 2**13 level-1 page table entries */
-#define SHIFT_FOR_MIN_APERTURE         13
-#define PAGES_FROM_APERTURE_ORDER(order)       \
-       ((1 << (order)) << SHIFT_FOR_MIN_APERTURE)
-#define ORDER_FROM_APERTURE_PAGES(pages)       \
-       get_order(((pages) * PAGE_SIZE) >> SHIFT_FOR_MIN_APERTURE)
-
-/*
- * PCI config-space
- */
-#define VALID_PCI_VENDOR_ID(id)                (((id) != 0) && ((id) != 
0xFFFF))
-#define IS_PCI_MULTI_FUNCTION(hdr)     ((hdr) & 0x80)
-#define IS_PCI_TYPE0_HEADER(hdr)       (((hdr) & 0x7f) == 0)
-#define IS_PCI_TYPE1_HEADER(hdr)       (((hdr) & 0x7f) == 1)
-
-#define PCI_MAX_BUS_COUNT      256
-#define PCI_MAX_DEV_COUNT      32
-#define PCI_MAX_FUNC_COUNT     8
-#define PCI_MIN_DEVFN          0
-#define PCI_MAX_DEVFN          0xFF
-
-/*
- * Capability blocks are 4-byte aligned, and must start at >= offset 0x40,
- * for a max of 48 possible cap_blocks (256 - 0x40 = 192; 192 / 4 = 48)
- * The lower 2 bits of each pointer are reserved, and must be masked off.
- */
-#define PCI_MIN_CAP_OFFSET     0x40
-#define PCI_MAX_CAP_BLOCKS     48
-#define PCI_CAP_PTR_MASK       0xFC
+#define PTE_PER_TABLE_SHIFT     9
+#define PTE_PER_TABLE_SIZE      (1 << PTE_PER_TABLE_SHIFT)
+#define PTE_PER_TABLE_MASK      (~(PTE_PER_TABLE_SIZE - 1))
+#define PTE_PER_TABLE_ALIGN(entries)    \
+    (((entries) + PTE_PER_TABLE_SIZE - 1) & PTE_PER_TABLE_MASK)
+#define PTE_PER_TABLE_ALLOC(entries)    \
+    PAGE_SIZE * (PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT)
+
+#define PCI_MIN_CAP_OFFSET  0x40
+#define PCI_MAX_CAP_BLOCKS  48
+#define PCI_CAP_PTR_MASK    0xFC
 
 /* IOMMU Capability */
-#define PCI_CAP_ID_MASK                0x000000FF
-#define PCI_CAP_ID_SHIFT       0
-#define PCI_CAP_NEXT_PTR_MASK  0x0000FF00
-#define PCI_CAP_NEXT_PTR_SHIFT 8
-#define PCI_CAP_TYPE_MASK      0x00070000
-#define PCI_CAP_TYPE_SHIFT     16
-#define PCI_CAP_REV_MASK       0x00F80000
-#define PCI_CAP_REV_SHIFT      19
-#define PCI_CAP_IOTLB_MASK     0x01000000
-#define PCI_CAP_IOTLB_SHIFT    24
-#define PCI_CAP_HT_TUNNEL_MASK 0x02000000
-#define PCI_CAP_HT_TUNNEL_SHIFT        25
-#define PCI_CAP_NP_CACHE_MASK  0x04000000
-#define PCI_CAP_NP_CACHE_SHIFT 26
-#define PCI_CAP_RESET_MASK     0x80000000
-#define PCI_CAP_RESET_SHIFT    31
-
-#define PCI_CAP_ID_SECURE_DEVICE       0x0F
-#define PCI_CAP_TYPE_IOMMU             0x3
-
-#define PCI_CAP_MMIO_BAR_LOW_OFFSET    0x04
-#define PCI_CAP_MMIO_BAR_HIGH_OFFSET   0x08
-#define PCI_CAP_MMIO_BAR_LOW_MASK      0xFFFFC000
-#define IOMMU_MMIO_REGION_LENGTH       0x4000
-
-#define PCI_CAP_RANGE_OFFSET           0x0C
-#define PCI_CAP_BUS_NUMBER_MASK                0x0000FF00
-#define PCI_CAP_BUS_NUMBER_SHIFT       8
-#define PCI_CAP_FIRST_DEVICE_MASK      0x00FF0000
-#define PCI_CAP_FIRST_DEVICE_SHIFT     16
-#define PCI_CAP_LAST_DEVICE_MASK       0xFF000000
-#define PCI_CAP_LAST_DEVICE_SHIFT      24
+#define PCI_CAP_ID_MASK     0x000000FF
+#define PCI_CAP_ID_SHIFT    0
+#define PCI_CAP_NEXT_PTR_MASK   0x0000FF00
+#define PCI_CAP_NEXT_PTR_SHIFT  8
+#define PCI_CAP_TYPE_MASK   0x00070000
+#define PCI_CAP_TYPE_SHIFT  16
+#define PCI_CAP_REV_MASK    0x00F80000
+#define PCI_CAP_REV_SHIFT   19
+#define PCI_CAP_IOTLB_MASK  0x01000000
+#define PCI_CAP_IOTLB_SHIFT 24
+#define PCI_CAP_HT_TUNNEL_MASK  0x02000000
+#define PCI_CAP_HT_TUNNEL_SHIFT 25
+#define PCI_CAP_NP_CACHE_MASK   0x04000000
+#define PCI_CAP_NP_CACHE_SHIFT  26
+#define PCI_CAP_RESET_MASK  0x80000000
+#define PCI_CAP_RESET_SHIFT 31
+
+#define PCI_CAP_TYPE_IOMMU      0x3
+
+#define PCI_CAP_MMIO_BAR_LOW_OFFSET 0x04
+#define PCI_CAP_MMIO_BAR_HIGH_OFFSET    0x08
+#define PCI_CAP_MMIO_BAR_LOW_MASK   0xFFFFC000
+#define IOMMU_MMIO_REGION_LENGTH    0x4000
+
+#define PCI_CAP_RANGE_OFFSET        0x0C
+#define PCI_CAP_BUS_NUMBER_MASK     0x0000FF00
+#define PCI_CAP_BUS_NUMBER_SHIFT    8
+#define PCI_CAP_FIRST_DEVICE_MASK   0x00FF0000
+#define PCI_CAP_FIRST_DEVICE_SHIFT  16
+#define PCI_CAP_LAST_DEVICE_MASK    0xFF000000
+#define PCI_CAP_LAST_DEVICE_SHIFT   24
 
 #define PCI_CAP_UNIT_ID_MASK    0x0000001F
 #define PCI_CAP_UNIT_ID_SHIFT   0
diff -r 10b709ce3050 -r c1aada7b3341 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Tue Oct 27 12:39:30 
2009 +0000
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Tue Oct 27 12:43:42 
2009 +0000
@@ -32,18 +32,14 @@
 #define DMA_32BIT_MASK  0x00000000ffffffffULL
 #define PAGE_ALIGN(addr)    (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
 
-#ifdef AMD_IOV_DEBUG
-#define amd_iov_info(fmt, args...) \
-    printk(XENLOG_INFO "AMD_IOV: " fmt, ## args)
-#define amd_iov_warning(fmt, args...) \
-    printk(XENLOG_WARNING "AMD_IOV: " fmt, ## args)
-#define amd_iov_error(fmt, args...) \
-    printk(XENLOG_ERR "AMD_IOV: %s:%d: " fmt, __FILE__ , __LINE__ , ## args)
-#else
-#define amd_iov_info(fmt, args...)
-#define amd_iov_warning(fmt, args...)
-#define amd_iov_error(fmt, args...)
-#endif
+extern int amd_iommu_debug;
+
+#define AMD_IOMMU_DEBUG(fmt, args...) \
+    do  \
+    {   \
+        if ( amd_iommu_debug )  \
+            printk("AMD-Vi: " fmt, ## args);    \
+    } while(0)
 
 /* amd-iommu-detect functions */
 int __init amd_iommu_get_ivrs_dev_entries(void);
@@ -52,10 +48,7 @@ int __init amd_iommu_detect_acpi(void);
 
 /* amd-iommu-init functions */
 int __init amd_iommu_init(void);
-int __init amd_iommu_init_one(struct amd_iommu *iommu);
 int __init amd_iommu_update_ivrs_mapping_acpi(void);
-void __init amd_iommu_init_cleanup(void);
-int __init amd_iommu_setup_shared_tables(void);
 
 /* mapping functions */
 int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
@@ -67,9 +60,14 @@ void invalidate_all_iommu_pages(struct d
 void invalidate_all_iommu_pages(struct domain *d);
 
 /* device table functions */
-void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,
-        u16 domain_id, u8 sys_mgt, u8 dev_ex, u8 paging_mode);
-int amd_iommu_is_dte_page_translation_valid(u32 *entry);
+int get_dma_requestor_id(u16 bdf);
+void amd_iommu_add_dev_table_entry(
+    u32 *dte, u8 sys_mgt, u8 dev_ex, u8 lint1_pass, u8 lint0_pass, 
+    u8 nmi_pass, u8 ext_int_pass, u8 init_pass);
+void amd_iommu_set_intremap_table(
+    u32 *dte, u64 intremap_ptr, u8 int_valid);
+void amd_iommu_set_root_page_table(
+    u32 *dte, u64 root_ptr, u16 domain_id, u8 paging_mode, u8 valid);
 void invalidate_dev_table_entry(struct amd_iommu *iommu, u16 devic_id);
 
 /* send cmd to iommu */
@@ -77,11 +75,12 @@ void flush_command_buffer(struct amd_iom
 void flush_command_buffer(struct amd_iommu *iommu);
 
 /* find iommu for bdf */
-struct amd_iommu *find_iommu_for_device(int bus, int devfn);
+struct amd_iommu *find_iommu_for_device(int bdf);
 
 /*interrupt remapping */
-int __init amd_iommu_setup_intremap_table(void);
-int __init deallocate_intremap_table(void);
+int __init amd_iommu_setup_ioapic_remapping(void);
+void*__init amd_iommu_alloc_intremap_table(void);
+void __init amd_iommu_free_intremap_table(int bdf);
 void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id);
 void amd_iommu_ioapic_update_ire(
     unsigned int apic, unsigned int reg, unsigned int value);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.