[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-3.4-testing] AMD IOMMU: Clean up hardware initialization functions to make them



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1256647170 0
# Node ID 10b709ce3050f13cd2e2a84bbff37082e6b3462a
# Parent  2a5604621e21435c9421ffca2419151566e49014
AMD IOMMU: Clean up hardware initialization functions to make them
more friendly to iommu suspend and resume operations.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
xen-unstable changeset:   19899:7d5433600932
xen-unstable date:        Mon Jul 06 11:56:17 2009 +0100

AMD IOMMU: Make iommu suspend & resume functions more generic.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
xen-unstable changeset:   19900:ff5bc91d0057
xen-unstable date:        Mon Jul 06 11:56:51 2009 +0100

AMD IOMMU: Add suspend and resume support for amd iommu.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
xen-unstable changeset:   19901:0ab211e699e6
xen-unstable date:        Mon Jul 06 11:57:18 2009 +0100
---
 xen/drivers/passthrough/amd/iommu_init.c      |  181 +++++++++++++++++++-------
 xen/drivers/passthrough/amd/pci_amd_iommu.c   |    2 
 xen/drivers/passthrough/iommu.c               |   14 ++
 xen/drivers/passthrough/vtd/iommu.c           |    6 
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |    4 
 xen/include/xen/iommu.h                       |    2 
 6 files changed, 163 insertions(+), 46 deletions(-)

diff -r 2a5604621e21 -r 10b709ce3050 xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c  Fri Oct 23 10:34:01 2009 +0100
+++ b/xen/drivers/passthrough/amd/iommu_init.c  Tue Oct 27 12:39:30 2009 +0000
@@ -67,7 +67,7 @@ static void __init unmap_iommu_mmio_regi
     }
 }
 
-static void __init register_iommu_dev_table_in_mmio_space(struct amd_iommu 
*iommu)
+static void register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu)
 {
     u64 addr_64, addr_lo, addr_hi;
     u32 entry;
@@ -90,7 +90,7 @@ static void __init register_iommu_dev_ta
     writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_HIGH_OFFSET);
 }
 
-static void __init register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu 
*iommu)
+static void register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu)
 {
     u64 addr_64, addr_lo, addr_hi;
     u32 power_of2_entries;
@@ -144,7 +144,7 @@ static void __init register_iommu_event_
     writel(entry, iommu->mmio_base+IOMMU_EVENT_LOG_BASE_HIGH_OFFSET);
 }
 
-static void __init set_iommu_translation_control(struct amd_iommu *iommu,
+static void set_iommu_translation_control(struct amd_iommu *iommu,
                                                  int enable)
 {
     u32 entry;
@@ -181,24 +181,28 @@ static void __init set_iommu_translation
     writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
 }
 
-static void __init set_iommu_command_buffer_control(struct amd_iommu *iommu,
+static void set_iommu_command_buffer_control(struct amd_iommu *iommu,
                                                     int enable)
 {
     u32 entry;
 
-    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
+    entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
     set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
                          IOMMU_CONTROL_DISABLED, entry,
                          IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK,
                          IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry);
+
+    /*reset head and tail pointer manually before enablement */
+    if ( enable == IOMMU_CONTROL_ENABLED )
+    {
+        writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
+        writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET);
+    }
+
     writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
-
-    /*reset head and tail pointer */
-    writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
-    writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET);
-}
-
-static void __init register_iommu_exclusion_range(struct amd_iommu *iommu)
+}
+
+static void register_iommu_exclusion_range(struct amd_iommu *iommu)
 {
     u64 addr_lo, addr_hi;
     u32 entry;
@@ -238,32 +242,31 @@ static void __init register_iommu_exclus
     writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET);
 }
 
-static void __init set_iommu_event_log_control(struct amd_iommu *iommu,
+static void set_iommu_event_log_control(struct amd_iommu *iommu,
             int enable)
 {
     u32 entry;
 
-    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
+    entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
     set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
                          IOMMU_CONTROL_DISABLED, entry,
                          IOMMU_CONTROL_EVENT_LOG_ENABLE_MASK,
                          IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT, &entry);
-    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
-
     set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
                          IOMMU_CONTROL_DISABLED, entry,
                          IOMMU_CONTROL_EVENT_LOG_INT_MASK,
                          IOMMU_CONTROL_EVENT_LOG_INT_SHIFT, &entry);
-    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
-
     set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
                          IOMMU_CONTROL_COMP_WAIT_INT_MASK,
                          IOMMU_CONTROL_COMP_WAIT_INT_SHIFT, &entry);
-    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
-
-    /*reset head and tail pointer */
-    writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
-    writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET);
+
+    /*reset head and tail pointer manually before enablement */
+    if ( enable == IOMMU_CONTROL_ENABLED )
+    {
+        writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
+        writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET);
+    }
+    writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
 }
 
 static int amd_iommu_read_event_log(struct amd_iommu *iommu, u32 event[])
@@ -502,7 +505,7 @@ static int set_iommu_interrupt_handler(s
     return vector;
 }
 
-void __init enable_iommu(struct amd_iommu *iommu)
+void enable_iommu(struct amd_iommu *iommu)
 {
     unsigned long flags;
 
@@ -513,10 +516,6 @@ void __init enable_iommu(struct amd_iomm
         spin_unlock_irqrestore(&iommu->lock, flags); 
         return;
     }
-
-    iommu->dev_table.alloc_size = device_table.alloc_size;
-    iommu->dev_table.entries = device_table.entries;
-    iommu->dev_table.buffer = device_table.buffer;
 
     register_iommu_dev_table_in_mmio_space(iommu);
     register_iommu_cmd_buffer_in_mmio_space(iommu);
@@ -530,9 +529,6 @@ void __init enable_iommu(struct amd_iomm
     set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
     set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
     set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
-
-    printk("AMD_IOV: IOMMU %d Enabled.\n", nr_amd_iommus );
-    nr_amd_iommus++;
 
     iommu->enabled = 1;
     spin_unlock_irqrestore(&iommu->lock, flags);
@@ -580,20 +576,24 @@ static int __init allocate_iommu_tables(
 {
     /* allocate 'command buffer' in power of 2 increments of 4K */
     iommu->cmd_buffer_tail = 0;
-    iommu->cmd_buffer.alloc_size = PAGE_SIZE << get_order_from_bytes(
-        PAGE_ALIGN(amd_iommu_cmd_buffer_entries * 
IOMMU_CMD_BUFFER_ENTRY_SIZE));
-    iommu->cmd_buffer.entries =
-        iommu->cmd_buffer.alloc_size / IOMMU_CMD_BUFFER_ENTRY_SIZE;
+    iommu->cmd_buffer.alloc_size = PAGE_SIZE <<
+                                   get_order_from_bytes(
+                                   PAGE_ALIGN(amd_iommu_cmd_buffer_entries *
+                                   IOMMU_CMD_BUFFER_ENTRY_SIZE));
+    iommu->cmd_buffer.entries = iommu->cmd_buffer.alloc_size /
+                                IOMMU_CMD_BUFFER_ENTRY_SIZE;
 
     if ( allocate_iommu_table_struct(&iommu->cmd_buffer, "Command Buffer") != 
0 )
         goto error_out;
 
     /* allocate 'event log' in power of 2 increments of 4K */
     iommu->event_log_head = 0;
-    iommu->event_log.alloc_size = PAGE_SIZE << get_order_from_bytes(
-        PAGE_ALIGN(amd_iommu_event_log_entries * IOMMU_EVENT_LOG_ENTRY_SIZE));
-    iommu->event_log.entries =
-        iommu->event_log.alloc_size / IOMMU_EVENT_LOG_ENTRY_SIZE;
+    iommu->event_log.alloc_size = PAGE_SIZE <<
+                                  get_order_from_bytes(
+                                  PAGE_ALIGN(amd_iommu_event_log_entries *
+                                  IOMMU_EVENT_LOG_ENTRY_SIZE));
+    iommu->event_log.entries = iommu->event_log.alloc_size /
+                               IOMMU_EVENT_LOG_ENTRY_SIZE;
 
     if ( allocate_iommu_table_struct(&iommu->event_log, "Event Log") != 0 )
         goto error_out;
@@ -607,7 +607,6 @@ static int __init allocate_iommu_tables(
 
 int __init amd_iommu_init_one(struct amd_iommu *iommu)
 {
-
     if ( allocate_iommu_tables(iommu) != 0 )
         goto error_out;
 
@@ -617,7 +616,18 @@ int __init amd_iommu_init_one(struct amd
     if ( set_iommu_interrupt_handler(iommu) == 0 )
         goto error_out;
 
+    /* To make sure that device_table.buffer has been successfully allocated */
+    if ( device_table.buffer == NULL )
+        goto error_out;
+
+    iommu->dev_table.alloc_size = device_table.alloc_size;
+    iommu->dev_table.entries = device_table.entries;
+    iommu->dev_table.buffer = device_table.buffer;
+
     enable_iommu(iommu);
+    printk("AMD-Vi: IOMMU %d Enabled.\n", nr_amd_iommus );
+    nr_amd_iommus++;
+
     return 0;
 
 error_out:
@@ -670,9 +680,12 @@ static int __init amd_iommu_setup_device
 static int __init amd_iommu_setup_device_table(void)
 {
     /* allocate 'device table' on a 4K boundary */
-    device_table.alloc_size = PAGE_SIZE << get_order_from_bytes(
-        PAGE_ALIGN(ivrs_bdf_entries * IOMMU_DEV_TABLE_ENTRY_SIZE));
-    device_table.entries = device_table.alloc_size / 
IOMMU_DEV_TABLE_ENTRY_SIZE;
+    device_table.alloc_size = PAGE_SIZE <<
+                              get_order_from_bytes(
+                              PAGE_ALIGN(ivrs_bdf_entries *
+                              IOMMU_DEV_TABLE_ENTRY_SIZE));
+    device_table.entries = device_table.alloc_size /
+                           IOMMU_DEV_TABLE_ENTRY_SIZE;
 
     return ( allocate_iommu_table_struct(&device_table, "Device Table") );
 }
@@ -681,7 +694,7 @@ int __init amd_iommu_setup_shared_tables
 {
     BUG_ON( !ivrs_bdf_entries );
 
-    if (init_ivrs_mapping() != 0 )
+    if ( init_ivrs_mapping() != 0 )
         goto error_out;
 
     if ( amd_iommu_setup_device_table() != 0 )
@@ -703,3 +716,83 @@ error_out:
     }
     return -ENOMEM;
 }
+
+static void disable_iommu(struct amd_iommu *iommu)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&iommu->lock, flags);
+
+    if ( !iommu->enabled )
+    {
+        spin_unlock_irqrestore(&iommu->lock, flags); 
+        return;
+    }
+
+    amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
+    set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_DISABLED);
+    set_iommu_event_log_control(iommu, IOMMU_CONTROL_DISABLED);
+    set_iommu_translation_control(iommu, IOMMU_CONTROL_DISABLED);
+
+    iommu->enabled = 0;
+
+    spin_unlock_irqrestore(&iommu->lock, flags);
+
+}
+
+static void invalidate_all_domain_pages(void)
+{
+    struct domain *d;
+    for_each_domain( d )
+        invalidate_all_iommu_pages(d);
+}
+
+static void invalidate_all_devices(void)
+{
+    u16 bus, devfn, bdf, req_id;
+    unsigned long flags;
+    struct amd_iommu *iommu;
+
+    for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
+    {
+        bus = bdf >> 8;
+        devfn = bdf & 0xFF;
+        iommu = find_iommu_for_device(bus, devfn);
+        req_id = ivrs_mappings[bdf].dte_requestor_id;
+        if ( iommu )
+        {
+            spin_lock_irqsave(&iommu->lock, flags);
+            invalidate_dev_table_entry(iommu, req_id);
+            invalidate_interrupt_table(iommu, req_id);
+            flush_command_buffer(iommu);
+            spin_unlock_irqrestore(&iommu->lock, flags);
+        }
+    }
+}
+
+void amd_iommu_suspend(void)
+{
+    struct amd_iommu *iommu;
+
+    for_each_amd_iommu ( iommu )
+        disable_iommu(iommu);
+}
+
+void amd_iommu_resume(void)
+{
+    struct amd_iommu *iommu;
+
+    for_each_amd_iommu ( iommu )
+    {
+       /*
+        * To make sure that iommus have not been touched 
+        * before re-enablement
+        */
+        disable_iommu(iommu);
+        enable_iommu(iommu);
+    }
+
+    /* flush all cache entries after iommu re-enabled */
+    invalidate_all_devices();
+    invalidate_all_domain_pages();
+}
diff -r 2a5604621e21 -r 10b709ce3050 xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c       Fri Oct 23 10:34:01 
2009 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c       Tue Oct 27 12:39:30 
2009 +0000
@@ -439,4 +439,6 @@ struct iommu_ops amd_iommu_ops = {
     .get_device_group_id = amd_iommu_group_id,
     .update_ire_from_apic = amd_iommu_ioapic_update_ire,
     .update_ire_from_msi = amd_iommu_msi_msg_update_ire,
+    .suspend = amd_iommu_suspend,
+    .resume = amd_iommu_resume,
 };
diff -r 2a5604621e21 -r 10b709ce3050 xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c   Fri Oct 23 10:34:01 2009 +0100
+++ b/xen/drivers/passthrough/iommu.c   Tue Oct 27 12:39:30 2009 +0000
@@ -338,6 +338,20 @@ void iommu_update_ire_from_msi(
     struct iommu_ops *ops = iommu_get_ops();
     ops->update_ire_from_msi(msi_desc, msg);
 }
+
+void iommu_resume()
+{
+    struct iommu_ops *ops = iommu_get_ops();
+    if ( iommu_enabled )
+        ops->resume();
+}
+
+void iommu_suspend()
+{
+    struct iommu_ops *ops = iommu_get_ops();
+    if ( iommu_enabled )
+        ops->suspend();
+}
 /*
  * Local variables:
  * mode: C
diff -r 2a5604621e21 -r 10b709ce3050 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Fri Oct 23 10:34:01 2009 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Tue Oct 27 12:39:30 2009 +0000
@@ -1908,7 +1908,7 @@ static int intel_iommu_group_id(u8 bus, 
 }
 
 static u32 iommu_state[MAX_IOMMUS][MAX_IOMMU_REGS];
-void iommu_suspend(void)
+void vtd_suspend(void)
 {
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu;
@@ -1943,7 +1943,7 @@ void iommu_suspend(void)
     }
 }
 
-void iommu_resume(void)
+void vtd_resume(void)
 {
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu;
@@ -2009,6 +2009,8 @@ struct iommu_ops intel_iommu_ops = {
     .get_device_group_id = intel_iommu_group_id,
     .update_ire_from_apic = io_apic_write_remap_rte,
     .update_ire_from_msi = msi_msg_write_remap_rte,
+    .suspend = vtd_suspend,
+    .resume = vtd_resume,
 };
 
 /*
diff -r 2a5604621e21 -r 10b709ce3050 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Fri Oct 23 10:34:01 
2009 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Tue Oct 27 12:39:30 
2009 +0000
@@ -88,6 +88,10 @@ void amd_iommu_msi_msg_update_ire(
 void amd_iommu_msi_msg_update_ire(
     struct msi_desc *msi_desc, struct msi_msg *msg);
 
+/* power management support */
+void amd_iommu_resume(void);
+void amd_iommu_suspend(void);
+
 static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift)
 {
     u32 field;
diff -r 2a5604621e21 -r 10b709ce3050 xen/include/xen/iommu.h
--- a/xen/include/xen/iommu.h   Fri Oct 23 10:34:01 2009 +0100
+++ b/xen/include/xen/iommu.h   Tue Oct 27 12:39:30 2009 +0000
@@ -108,6 +108,8 @@ struct iommu_ops {
     int (*get_device_group_id)(u8 bus, u8 devfn);
     void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned 
int value);
     void (*update_ire_from_msi)(struct msi_desc *msi_desc, struct msi_msg 
*msg);
+    void (*suspend)(void);
+    void (*resume)(void);
 };
 
 void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned 
int value);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.