[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] AMD IOMMU: Add suspend and resume support for amd iommu.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1246877838 -3600
# Node ID 0ab211e699e65310c10d90c29b29c8c1e083a4aa
# Parent  ff5bc91d0057f4a37076f2393bbedfd62bdb1f0e
AMD IOMMU: Add suspend and resume support for amd iommu.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
---
 xen/drivers/passthrough/amd/iommu_init.c |   80 +++++++++++++++++++++++++++++++
 1 files changed, 80 insertions(+)

diff -r ff5bc91d0057 -r 0ab211e699e6 xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c  Mon Jul 06 11:56:51 2009 +0100
+++ b/xen/drivers/passthrough/amd/iommu_init.c  Mon Jul 06 11:57:18 2009 +0100
@@ -716,3 +716,83 @@ error_out:
     }
     return -ENOMEM;
 }
+
+static void disable_iommu(struct amd_iommu *iommu)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&iommu->lock, flags);
+
+    if ( !iommu->enabled )
+    {
+        spin_unlock_irqrestore(&iommu->lock, flags); 
+        return;
+    }
+
+    amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
+    set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_DISABLED);
+    set_iommu_event_log_control(iommu, IOMMU_CONTROL_DISABLED);
+    set_iommu_translation_control(iommu, IOMMU_CONTROL_DISABLED);
+
+    iommu->enabled = 0;
+
+    spin_unlock_irqrestore(&iommu->lock, flags);
+
+}
+
+static void invalidate_all_domain_pages(void)
+{
+    struct domain *d;
+    for_each_domain( d )
+        invalidate_all_iommu_pages(d);
+}
+
+static void invalidate_all_devices(void)
+{
+    u16 bus, devfn, bdf, req_id;
+    unsigned long flags;
+    struct amd_iommu *iommu;
+
+    for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
+    {
+        bus = bdf >> 8;
+        devfn = bdf & 0xFF;
+        iommu = find_iommu_for_device(bus, devfn);
+        req_id = ivrs_mappings[bdf].dte_requestor_id;
+        if ( iommu )
+        {
+            spin_lock_irqsave(&iommu->lock, flags);
+            invalidate_dev_table_entry(iommu, req_id);
+            invalidate_interrupt_table(iommu, req_id);
+            flush_command_buffer(iommu);
+            spin_unlock_irqrestore(&iommu->lock, flags);
+        }
+    }
+}
+
+void amd_iommu_suspend(void)
+{
+    struct amd_iommu *iommu;
+
+    for_each_amd_iommu ( iommu )
+        disable_iommu(iommu);
+}
+
+void amd_iommu_resume(void)
+{
+    struct amd_iommu *iommu;
+
+    for_each_amd_iommu ( iommu )
+    {
+       /*
+        * To make sure that iommus have not been touched 
+        * before re-enablement
+        */
+        disable_iommu(iommu);
+        enable_iommu(iommu);
+    }
+
+    /* flush all cache entries after iommu re-enabled */
+    invalidate_all_devices();
+    invalidate_all_domain_pages();
+}

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.