[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] AMD IOMMU: Add event logging support.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1208349803 -3600
# Node ID 837ea1f0aa8ab3446cfb8bf3ab9d68e258d08785
# Parent  da4042899fd295ae5258b2f26f374afa3d1e1e17
AMD IOMMU: Add event logging support.

MSI interrupt will be triggered on IO page fault and the fault address
will be written into an in-memory event log buffer automatically by
hardware. This patch handles the MSI interrupt simply by reading event
log entry and printing out the parsed result.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
---
 xen/drivers/passthrough/amd/iommu_detect.c    |   41 +++
 xen/drivers/passthrough/amd/iommu_init.c      |  299 ++++++++++++++++++++++++++
 xen/drivers/passthrough/amd/pci_amd_iommu.c   |   23 +-
 xen/include/asm-x86/amd-iommu.h               |    3 
 xen/include/asm-x86/hvm/svm/amd-iommu-defs.h  |   37 +++
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |    1 
 6 files changed, 400 insertions(+), 4 deletions(-)

diff -r da4042899fd2 -r 837ea1f0aa8a xen/drivers/passthrough/amd/iommu_detect.c
--- a/xen/drivers/passthrough/amd/iommu_detect.c        Wed Apr 16 13:40:46 
2008 +0100
+++ b/xen/drivers/passthrough/amd/iommu_detect.c        Wed Apr 16 13:43:23 
2008 +0100
@@ -82,6 +82,45 @@ int __init get_iommu_last_downstream_bus
         }
     }
 
+    return 0;
+}
+
+static int __init get_iommu_msi_capabilities(u8 bus, u8 dev, u8 func,
+            struct amd_iommu *iommu)
+{
+    int cap_ptr, cap_id;
+    u32 cap_header;
+    u16 control;
+    int count = 0;
+
+    cap_ptr = pci_conf_read8(bus, dev, func,
+            PCI_CAPABILITY_LIST);
+
+    while ( cap_ptr >= PCI_MIN_CAP_OFFSET &&
+        count < PCI_MAX_CAP_BLOCKS )
+    {
+        cap_ptr &= PCI_CAP_PTR_MASK;
+        cap_header = pci_conf_read32(bus, dev, func, cap_ptr);
+        cap_id = get_field_from_reg_u32(cap_header,
+                PCI_CAP_ID_MASK, PCI_CAP_ID_SHIFT);
+
+        if ( cap_id == PCI_CAP_ID_MSI )
+        {
+            iommu->msi_cap = cap_ptr;
+            break;
+        }
+        cap_ptr = get_field_from_reg_u32(cap_header,
+                PCI_CAP_NEXT_PTR_MASK, PCI_CAP_NEXT_PTR_SHIFT);
+        count++;
+    }
+
+    if ( !iommu->msi_cap )
+        return -ENODEV;
+
+    dprintk(XENLOG_INFO, "AMD IOMMU: Found MSI capability block \n");
+    control = pci_conf_read16(bus, dev, func,
+            iommu->msi_cap + PCI_MSI_FLAGS);
+    iommu->maskbit = control & PCI_MSI_FLAGS_MASKBIT;
     return 0;
 }
 
@@ -133,6 +172,8 @@ int __init get_iommu_capabilities(u8 bus
     iommu->msi_number = get_field_from_reg_u32(
         misc_info, PCI_CAP_MSI_NUMBER_MASK, PCI_CAP_MSI_NUMBER_SHIFT);
 
+    get_iommu_msi_capabilities(bus, dev, func, iommu);
+
     return 0;
 }
 
diff -r da4042899fd2 -r 837ea1f0aa8a xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c  Wed Apr 16 13:40:46 2008 +0100
+++ b/xen/drivers/passthrough/amd/iommu_init.c  Wed Apr 16 13:43:23 2008 +0100
@@ -27,6 +27,7 @@
 #include "../pci_regs.h"
 
 extern int nr_amd_iommus;
+static struct amd_iommu *vector_to_iommu[NR_VECTORS];
 
 int __init map_iommu_mmio_region(struct amd_iommu *iommu)
 {
@@ -109,6 +110,33 @@ void __init register_iommu_cmd_buffer_in
     writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET);
 }
 
+void __init register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu)
+{
+    u64 addr_64, addr_lo, addr_hi;
+    u32 power_of2_entries;
+    u32 entry;
+
+    addr_64 = (u64)virt_to_maddr(iommu->event_log.buffer);
+    addr_lo = addr_64 & DMA_32BIT_MASK;
+    addr_hi = addr_64 >> 32;
+
+    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
+                         IOMMU_EVENT_LOG_BASE_LOW_MASK,
+                         IOMMU_EVENT_LOG_BASE_LOW_SHIFT, &entry);
+    writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_LOW_OFFSET);
+
+    power_of2_entries = get_order_from_bytes(iommu->event_log.alloc_size) +
+                        IOMMU_EVENT_LOG_POWER_OF2_ENTRIES_PER_PAGE;
+
+    set_field_in_reg_u32((u32)addr_hi, 0,
+                        IOMMU_EVENT_LOG_BASE_HIGH_MASK,
+                        IOMMU_EVENT_LOG_BASE_HIGH_SHIFT, &entry);
+    set_field_in_reg_u32(power_of2_entries, entry,
+                        IOMMU_EVENT_LOG_LENGTH_MASK,
+                        IOMMU_EVENT_LOG_LENGTH_SHIFT, &entry);
+    writel(entry, iommu->mmio_base+IOMMU_EVENT_LOG_BASE_HIGH_OFFSET);
+}
+
 static void __init set_iommu_translation_control(struct amd_iommu *iommu,
                                                  int enable)
 {
@@ -179,10 +207,281 @@ static void __init register_iommu_exclus
     writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET);
 }
 
+static void __init set_iommu_event_log_control(struct amd_iommu *iommu,
+            int enable)
+{
+    u32 entry;
+
+    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
+    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
+                         IOMMU_CONTROL_DISABLED, entry,
+                         IOMMU_CONTROL_EVENT_LOG_ENABLE_MASK,
+                         IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT, &entry);
+    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
+
+    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
+                         IOMMU_CONTROL_DISABLED, entry,
+                         IOMMU_CONTROL_EVENT_LOG_INT_MASK,
+                         IOMMU_CONTROL_EVENT_LOG_INT_SHIFT, &entry);
+    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
+
+    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
+                         IOMMU_CONTROL_DISABLED, entry,
+                         IOMMU_CONTROL_COMP_WAIT_INT_MASK,
+                         IOMMU_CONTROL_COMP_WAIT_INT_SHIFT, &entry);
+    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
+}
+
+static int amd_iommu_read_event_log(struct amd_iommu *iommu, u32 event[])
+{
+    u32 tail, head, *event_log;
+    int i;
+
+     BUG_ON( !iommu || !event );
+
+    /* make sure there's an entry in the log */
+    tail = get_field_from_reg_u32(
+                readl(iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET),
+                IOMMU_EVENT_LOG_TAIL_MASK,
+                IOMMU_EVENT_LOG_TAIL_SHIFT);
+    if ( tail != iommu->event_log_head )
+    {
+        /* read event log entry */
+        event_log = (u32 *)(iommu->event_log.buffer +
+                                        (iommu->event_log_head *
+                                        IOMMU_EVENT_LOG_ENTRY_SIZE));
+        for ( i = 0; i < IOMMU_EVENT_LOG_U32_PER_ENTRY; i++ )
+            event[i] = event_log[i];
+        if ( ++iommu->event_log_head == iommu->event_log.entries )
+            iommu->event_log_head = 0;
+
+        /* update head pointer */
+        set_field_in_reg_u32(iommu->event_log_head, 0,
+                             IOMMU_EVENT_LOG_HEAD_MASK,
+                             IOMMU_EVENT_LOG_HEAD_SHIFT, &head);
+        writel(head, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
+        return 0;
+    }
+
+    return -EFAULT;
+}
+
+static void amd_iommu_msi_data_init(struct amd_iommu *iommu, int vector)
+{
+    u32 msi_data;
+    u8 bus = (iommu->bdf >> 8) & 0xff;
+    u8 dev = PCI_SLOT(iommu->bdf & 0xff);
+    u8 func = PCI_FUNC(iommu->bdf & 0xff);
+
+    msi_data = MSI_DATA_TRIGGER_EDGE |
+        MSI_DATA_LEVEL_ASSERT |
+        MSI_DATA_DELIVERY_FIXED |
+        MSI_DATA_VECTOR(vector);
+
+    pci_conf_write32(bus, dev, func,
+        iommu->msi_cap + PCI_MSI_DATA_64, msi_data);
+}
+
+static void amd_iommu_msi_addr_init(struct amd_iommu *iommu, int phy_cpu)
+{
+
+    int bus = (iommu->bdf >> 8) & 0xff;
+    int dev = PCI_SLOT(iommu->bdf & 0xff);
+    int func = PCI_FUNC(iommu->bdf & 0xff);
+
+    u32 address_hi = 0;
+    u32 address_lo = MSI_ADDR_HEADER |
+            MSI_ADDR_DESTMODE_PHYS |
+            MSI_ADDR_REDIRECTION_CPU |
+            MSI_ADDR_DESTID_CPU(phy_cpu);
+
+    pci_conf_write32(bus, dev, func,
+        iommu->msi_cap + PCI_MSI_ADDRESS_LO, address_lo);
+    pci_conf_write32(bus, dev, func,
+        iommu->msi_cap + PCI_MSI_ADDRESS_HI, address_hi);
+}
+
+static void amd_iommu_msi_enable(struct amd_iommu *iommu, int flag)
+{
+    u16 control;
+    int bus = (iommu->bdf >> 8) & 0xff;
+    int dev = PCI_SLOT(iommu->bdf & 0xff);
+    int func = PCI_FUNC(iommu->bdf & 0xff);
+
+    control = pci_conf_read16(bus, dev, func,
+        iommu->msi_cap + PCI_MSI_FLAGS);
+    control &= ~(1);
+    if ( flag )
+        control |= flag;
+    pci_conf_write16(bus, dev, func,
+        iommu->msi_cap + PCI_MSI_FLAGS, control);
+}
+
+static void iommu_msi_unmask(unsigned int vector)
+{
+    unsigned long flags;
+    struct amd_iommu *iommu = vector_to_iommu[vector];
+
+    /* FIXME: do not support mask bits at the moment */
+    if ( iommu->maskbit )
+        return;
+
+    spin_lock_irqsave(&iommu->lock, flags);
+    amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
+    spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static void iommu_msi_mask(unsigned int vector)
+{
+    unsigned long flags;
+    struct amd_iommu *iommu = vector_to_iommu[vector];
+
+    /* FIXME: do not support mask bits at the moment */
+    if ( iommu->maskbit )
+        return;
+
+    spin_lock_irqsave(&iommu->lock, flags);
+    amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
+    spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static unsigned int iommu_msi_startup(unsigned int vector)
+{
+    iommu_msi_unmask(vector);
+    return 0;
+}
+
+static void iommu_msi_end(unsigned int vector)
+{
+    iommu_msi_unmask(vector);
+    ack_APIC_irq();
+}
+
+static void iommu_msi_set_affinity(unsigned int vector, cpumask_t dest)
+{
+    struct amd_iommu *iommu = vector_to_iommu[vector];
+    amd_iommu_msi_addr_init(iommu, cpu_physical_id(first_cpu(dest)));
+}
+
+static struct hw_interrupt_type iommu_msi_type = {
+    .typename = "AMD_IOV_MSI",
+    .startup = iommu_msi_startup,
+    .shutdown = iommu_msi_mask,
+    .enable = iommu_msi_unmask,
+    .disable = iommu_msi_mask,
+    .ack = iommu_msi_mask,
+    .end = iommu_msi_end,
+    .set_affinity = iommu_msi_set_affinity,
+};
+
+static void parse_event_log_entry(u32 entry[])
+{
+    u16 domain_id, device_id;
+    u32 code;
+    u64 *addr;
+    char * event_str[] = {"ILLEGAL_DEV_TABLE_ENTRY",
+                                         "IO_PAGE_FALT",
+                                         "DEV_TABLE_HW_ERROR",
+                                         "PAGE_TABLE_HW_ERROR",
+                                         "ILLEGAL_COMMAND_ERROR",
+                                         "COMMAND_HW_ERROR",
+                                         "IOTLB_INV_TIMEOUT",
+                                         "INVALID_DEV_REQUEST"};
+
+    code = get_field_from_reg_u32(entry[1],
+                                           IOMMU_EVENT_CODE_MASK,
+                                           IOMMU_EVENT_CODE_SHIFT);
+
+    if ( (code > IOMMU_EVENT_INVALID_DEV_REQUEST)
+        || (code < IOMMU_EVENT_ILLEGAL_DEV_TABLE_ENTRY) )
+    {
+        dprintk(XENLOG_ERR, "Invalid event log entry!\n");
+        return;
+    }
+
+    if ( code == IOMMU_EVENT_IO_PAGE_FALT )
+    {
+        device_id = get_field_from_reg_u32(entry[0],
+                                           IOMMU_EVENT_DEVICE_ID_MASK,
+                                           IOMMU_EVENT_DEVICE_ID_SHIFT);
+        domain_id = get_field_from_reg_u32(entry[1],
+                                           IOMMU_EVENT_DOMAIN_ID_MASK,
+                                           IOMMU_EVENT_DOMAIN_ID_SHIFT);
+        addr= (u64*) (entry + 2);
+        dprintk(XENLOG_ERR,
+            "%s: domain = %d, device id = 0x%x, fault address = 0x%"PRIx64"\n",
+            event_str[code-1], domain_id, device_id, *addr);
+    }
+}
+
+static void amd_iommu_page_fault(int vector, void *dev_id,
+                             struct cpu_user_regs *regs)
+{
+    u32  event[4];
+    unsigned long flags;
+    int ret = 0;
+    struct amd_iommu *iommu = dev_id;
+
+    spin_lock_irqsave(&iommu->lock, flags);
+    ret = amd_iommu_read_event_log(iommu, event);
+    spin_unlock_irqrestore(&iommu->lock, flags);
+
+    if ( ret != 0 )
+        return;
+    parse_event_log_entry(event);
+}
+
+static int set_iommu_interrupt_handler(struct amd_iommu *iommu)
+{
+    int vector, ret;
+    unsigned long flags;
+
+    vector = assign_irq_vector(AUTO_ASSIGN);
+    vector_to_iommu[vector] = iommu;
+
+    /* make irq == vector */
+    irq_vector[vector] = vector;
+    vector_irq[vector] = vector;
+
+    if ( !vector )
+    {
+        gdprintk(XENLOG_ERR, "AMD IOMMU: no vectors\n");
+        return 0;
+    }
+
+    irq_desc[vector].handler = &iommu_msi_type;
+    ret = request_irq(vector, amd_iommu_page_fault, 0, "dmar", iommu);
+    if ( ret )
+    {
+        gdprintk(XENLOG_ERR, "AMD IOMMU: can't request irq\n");
+        return 0;
+    }
+
+    spin_lock_irqsave(&iommu->lock, flags);
+
+    amd_iommu_msi_data_init (iommu, vector);
+    amd_iommu_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map)));
+    amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
+
+    spin_unlock_irqrestore(&iommu->lock, flags);
+
+    return vector;
+}
+
 void __init enable_iommu(struct amd_iommu *iommu)
 {
+    unsigned long flags;
+
+    set_iommu_interrupt_handler(iommu);
+
+    spin_lock_irqsave(&iommu->lock, flags);
+
     register_iommu_exclusion_range(iommu);
     set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
+    set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
     set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
+
+    spin_unlock_irqrestore(&iommu->lock, flags);
+
     printk("AMD IOMMU %d: Enabled\n", nr_amd_iommus);
 }
diff -r da4042899fd2 -r 837ea1f0aa8a xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c       Wed Apr 16 13:40:46 
2008 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c       Wed Apr 16 13:43:23 
2008 +0100
@@ -29,6 +29,7 @@ struct list_head amd_iommu_head;
 struct list_head amd_iommu_head;
 long amd_iommu_poll_comp_wait = COMPLETION_WAIT_DEFAULT_POLLING_COUNT;
 static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES;
+static long amd_iommu_event_log_entries = IOMMU_EVENT_LOG_DEFAULT_ENTRIES;
 int nr_amd_iommus = 0;
 
 unsigned short ivrs_bdf_entries = 0;
@@ -73,7 +74,8 @@ static void __init deallocate_iommu_reso
 static void __init deallocate_iommu_resources(struct amd_iommu *iommu)
 {
     deallocate_iommu_table_struct(&iommu->dev_table);
-    deallocate_iommu_table_struct(&iommu->cmd_buffer);;
+    deallocate_iommu_table_struct(&iommu->cmd_buffer);
+    deallocate_iommu_table_struct(&iommu->event_log);
 }
 
 static void __init detect_cleanup(void)
@@ -137,6 +139,20 @@ static int __init allocate_iommu_resourc
 
     if ( allocate_iommu_table_struct(&iommu->cmd_buffer,
                                      "Command Buffer") != 0 )
+        goto error_out;
+
+    /* allocate 'event log' in power of 2 increments of 4K */
+    iommu->event_log_head = 0;
+    iommu->event_log.alloc_size =
+        PAGE_SIZE << get_order_from_bytes(
+            PAGE_ALIGN(amd_iommu_event_log_entries *
+                        IOMMU_EVENT_LOG_ENTRY_SIZE));
+
+    iommu->event_log.entries =
+        iommu->event_log.alloc_size / IOMMU_EVENT_LOG_ENTRY_SIZE;
+
+    if ( allocate_iommu_table_struct(&iommu->event_log,
+                                     "Event Log") != 0 )
         goto error_out;
 
     return 0;
@@ -203,6 +219,7 @@ static int __init amd_iommu_init(void)
             goto error_out;
         register_iommu_dev_table_in_mmio_space(iommu);
         register_iommu_cmd_buffer_in_mmio_space(iommu);
+        register_iommu_event_log_in_mmio_space(iommu);
 
         spin_unlock_irqrestore(&iommu->lock, flags);
     }
@@ -224,11 +241,9 @@ static int __init amd_iommu_init(void)
 
     for_each_amd_iommu ( iommu )
     {
-        spin_lock_irqsave(&iommu->lock, flags);
         /* enable IOMMU translation services */
         enable_iommu(iommu);
         nr_amd_iommus++;
-        spin_unlock_irqrestore(&iommu->lock, flags);
     }
 
     amd_iommu_enabled = 1;
@@ -288,7 +303,7 @@ void amd_iommu_setup_domain_device(
         sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable;
         dev_ex = ivrs_mappings[req_id].dte_allow_exclusion;
         amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr,
-                                      req_id, sys_mgt, dev_ex,
+                                      hd->domain_id, sys_mgt, dev_ex,
                                       hd->paging_mode);
 
         invalidate_dev_table_entry(iommu, req_id);
diff -r da4042899fd2 -r 837ea1f0aa8a xen/include/asm-x86/amd-iommu.h
--- a/xen/include/asm-x86/amd-iommu.h   Wed Apr 16 13:40:46 2008 +0100
+++ b/xen/include/asm-x86/amd-iommu.h   Wed Apr 16 13:43:23 2008 +0100
@@ -79,6 +79,9 @@ struct amd_iommu {
     int exclusion_allow_all;
     uint64_t exclusion_base;
     uint64_t exclusion_limit;
+
+    int msi_cap;
+    int maskbit;
 };
 
 struct ivrs_mappings {
diff -r da4042899fd2 -r 837ea1f0aa8a 
xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      Wed Apr 16 13:40:46 
2008 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      Wed Apr 16 13:43:23 
2008 +0100
@@ -35,6 +35,9 @@
 /* IOMMU Command Buffer entries: in power of 2 increments, minimum of 256 */
 #define IOMMU_CMD_BUFFER_DEFAULT_ENTRIES       512
 
+/* IOMMU Event Log entries: in power of 2 increments, minimum of 256 */
+#define IOMMU_EVENT_LOG_DEFAULT_ENTRIES     512
+
 #define BITMAP_ENTRIES_PER_BYTE                8
 
 #define PTE_PER_TABLE_SHIFT            9
@@ -303,6 +306,11 @@
 #define IOMMU_EVENT_COMMAND_HW_ERROR           0x6
 #define IOMMU_EVENT_IOTLB_INV_TIMEOUT          0x7
 #define IOMMU_EVENT_INVALID_DEV_REQUEST                0x8
+
+#define IOMMU_EVENT_DOMAIN_ID_MASK           0x0000FFFF
+#define IOMMU_EVENT_DOMAIN_ID_SHIFT          0
+#define IOMMU_EVENT_DEVICE_ID_MASK           0x0000FFFF
+#define IOMMU_EVENT_DEVICE_ID_SHIFT          0
 
 /* Control Register */
 #define IOMMU_CONTROL_MMIO_OFFSET                      0x18
@@ -427,4 +435,33 @@
 #define IOMMU_IO_READ_ENABLED           1
 #define HACK_BIOS_SETTINGS                  0
 
+/* MSI interrupt */
+#define MSI_DATA_VECTOR_SHIFT       0
+#define MSI_DATA_VECTOR(v)      (((u8)v) << MSI_DATA_VECTOR_SHIFT)
+
+#define MSI_DATA_DELIVERY_SHIFT     8
+#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
+#define MSI_DATA_DELIVERY_LOWPRI    (1 << MSI_DATA_DELIVERY_SHIFT)
+
+#define MSI_DATA_LEVEL_SHIFT        14
+#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
+#define MSI_DATA_LEVEL_ASSERT   (1 << MSI_DATA_LEVEL_SHIFT)
+
+#define MSI_DATA_TRIGGER_SHIFT      15
+#define MSI_DATA_TRIGGER_EDGE   (0 << MSI_DATA_TRIGGER_SHIFT)
+#define  MSI_DATA_TRIGGER_LEVEL  (1 << MSI_DATA_TRIGGER_SHIFT)
+
+#define MSI_TARGET_CPU_SHIFT        12
+#define MSI_ADDR_HEADER         0xfee00000
+#define MSI_ADDR_DESTID_MASK        0xfff0000f
+#define MSI_ADDR_DESTID_CPU(cpu)    ((cpu) << MSI_TARGET_CPU_SHIFT)
+
+#define MSI_ADDR_DESTMODE_SHIFT     2
+#define MSI_ADDR_DESTMODE_PHYS  (0 << MSI_ADDR_DESTMODE_SHIFT)
+#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
+
+#define MSI_ADDR_REDIRECTION_SHIFT  3
+#define MSI_ADDR_REDIRECTION_CPU    (0 << MSI_ADDR_REDIRECTION_SHIFT)
+#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
+
 #endif /* _ASM_X86_64_AMD_IOMMU_DEFS_H */
diff -r da4042899fd2 -r 837ea1f0aa8a 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Wed Apr 16 13:40:46 
2008 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Wed Apr 16 13:43:23 
2008 +0100
@@ -49,6 +49,7 @@ void __init unmap_iommu_mmio_region(stru
 void __init unmap_iommu_mmio_region(struct amd_iommu *iommu);
 void __init register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu);
 void __init register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu);
+void __init register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu);
 void __init enable_iommu(struct amd_iommu *iommu);
 
 /* mapping functions */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.