[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] VT-d: improve save/restore of registers across S3



commit 27713fa2aa2166407bb962e80a39480f7ca5cee2
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Apr 13 10:14:23 2021 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Apr 13 10:14:23 2021 +0200

    VT-d: improve save/restore of registers across S3
    
    The static allocation of the save space is not only very inefficient
    (most of the array slots won't ever get used), but is also the sole
    reason for a build-time upper bound on the number of IOMMUs. Introduce
    a structure containing just the one needed field we can't (easily)
    restore from other in-memory state, and allocate the respective
    array dynamically.
    
    Take the opportunity and make the FEUADDR write dependent upon
    x2apic_enabled, like is already the case in dma_msi_set_affinity().
    
    Also alter properties of nr_iommus: static, unsigned, and __initdata.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
 xen/drivers/passthrough/vtd/iommu.c | 46 +++++++++++++++----------------------
 xen/drivers/passthrough/vtd/iommu.h |  2 --
 xen/include/asm-x86/iommu.h         |  1 -
 3 files changed, 19 insertions(+), 30 deletions(-)

diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index 6428c8fe3e..10c71132db 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -57,7 +57,7 @@ bool __read_mostly iommu_qinval = true;
 bool __read_mostly iommu_snoop = true;
 #endif
 
-int nr_iommus;
+static unsigned int __initdata nr_iommus;
 
 static struct iommu_ops vtd_ops;
 static struct tasklet vtd_fault_tasklet;
@@ -1165,13 +1165,6 @@ int __init iommu_alloc(struct acpi_drhd_unit *drhd)
     unsigned long sagaw, nr_dom;
     int agaw;
 
-    if ( nr_iommus >= MAX_IOMMUS )
-    {
-        dprintk(XENLOG_ERR VTDPREFIX,
-                "IOMMU: nr_iommus %d > MAX_IOMMUS\n", nr_iommus + 1);
-        return -ENOMEM;
-    }
-
     iommu = xzalloc(struct vtd_iommu);
     if ( iommu == NULL )
         return -ENOMEM;
@@ -2226,6 +2219,10 @@ static void __hwdom_init setup_hwdom_rmrr(struct domain 
*d)
     pcidevs_unlock();
 }
 
+static struct iommu_state {
+    uint32_t fectl;
+} *__read_mostly iommu_state;
+
 static int __init vtd_setup(void)
 {
     struct acpi_drhd_unit *drhd;
@@ -2251,6 +2248,13 @@ static int __init vtd_setup(void)
         goto error;
     }
 
+    iommu_state = xmalloc_array(struct iommu_state, nr_iommus);
+    if ( !iommu_state )
+    {
+        ret = -ENOMEM;
+        goto error;
+    }
+
     /* We enable the following features only if they are supported by all VT-d
      * engines: Snoop Control, DMA passthrough, Queued Invalidation, Interrupt
      * Remapping, and Posted Interrupt
@@ -2508,8 +2512,6 @@ static int intel_iommu_group_id(u16 seg, u8 bus, u8 devfn)
         return PCI_BDF2(bus, devfn);
 }
 
-static u32 iommu_state[MAX_IOMMUS][MAX_IOMMU_REGS];
-
 static int __must_check vtd_suspend(void)
 {
     struct acpi_drhd_unit *drhd;
@@ -2534,14 +2536,7 @@ static int __must_check vtd_suspend(void)
         iommu = drhd->iommu;
         i = iommu->index;
 
-        iommu_state[i][DMAR_FECTL_REG] =
-            (u32) dmar_readl(iommu->reg, DMAR_FECTL_REG);
-        iommu_state[i][DMAR_FEDATA_REG] =
-            (u32) dmar_readl(iommu->reg, DMAR_FEDATA_REG);
-        iommu_state[i][DMAR_FEADDR_REG] =
-            (u32) dmar_readl(iommu->reg, DMAR_FEADDR_REG);
-        iommu_state[i][DMAR_FEUADDR_REG] =
-            (u32) dmar_readl(iommu->reg, DMAR_FEUADDR_REG);
+        iommu_state[i].fectl = dmar_readl(iommu->reg, DMAR_FECTL_REG);
 
         /* don't disable VT-d engine when force_iommu is set. */
         if ( force_iommu )
@@ -2594,15 +2589,13 @@ static void vtd_resume(void)
     for_each_drhd_unit ( drhd )
     {
         iommu = drhd->iommu;
-        i = iommu->index;
 
         spin_lock_irqsave(&iommu->register_lock, flags);
-        dmar_writel(iommu->reg, DMAR_FEDATA_REG,
-                    iommu_state[i][DMAR_FEDATA_REG]);
-        dmar_writel(iommu->reg, DMAR_FEADDR_REG,
-                    iommu_state[i][DMAR_FEADDR_REG]);
-        dmar_writel(iommu->reg, DMAR_FEUADDR_REG,
-                    iommu_state[i][DMAR_FEUADDR_REG]);
+        dmar_writel(iommu->reg, DMAR_FEDATA_REG, iommu->msi.msg.data);
+        dmar_writel(iommu->reg, DMAR_FEADDR_REG, iommu->msi.msg.address_lo);
+        if ( x2apic_enabled )
+            dmar_writel(iommu->reg, DMAR_FEUADDR_REG,
+                        iommu->msi.msg.address_hi);
         spin_unlock_irqrestore(&iommu->register_lock, flags);
     }
 
@@ -2615,8 +2608,7 @@ static void vtd_resume(void)
         i = iommu->index;
 
         spin_lock_irqsave(&iommu->register_lock, flags);
-        dmar_writel(iommu->reg, DMAR_FECTL_REG,
-                    (u32) iommu_state[i][DMAR_FECTL_REG]);
+        dmar_writel(iommu->reg, DMAR_FECTL_REG, iommu_state[i].fectl);
         spin_unlock_irqrestore(&iommu->register_lock, flags);
 
         iommu_enable_translation(drhd);
diff --git a/xen/drivers/passthrough/vtd/iommu.h 
b/xen/drivers/passthrough/vtd/iommu.h
index 216791b3d6..6f41068c09 100644
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -499,8 +499,6 @@ struct qinval_entry {
 #define VTD_PAGE_TABLE_LEVEL_3  3
 #define VTD_PAGE_TABLE_LEVEL_4  4
 
-#define MAX_IOMMU_REGS 0xc0
-
 extern struct list_head acpi_drhd_units;
 extern struct list_head acpi_rmrr_units;
 extern struct list_head acpi_ioapic_units;
diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
index 970eb06ffa..65a0b02f60 100644
--- a/xen/include/asm-x86/iommu.h
+++ b/xen/include/asm-x86/iommu.h
@@ -22,7 +22,6 @@
 #include <asm/hvm/vmx/vmcs.h>
 
 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
-#define MAX_IOMMUS 32
 
 struct g2m_ioport {
     struct list_head list;
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.