[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] VT-d: adjust IOMMU interrupt affinities when all CPUs are online


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Tue, 04 Dec 2012 00:22:10 +0000
  • Delivery-date: Tue, 04 Dec 2012 00:22:18 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1354093704 -3600
# Node ID 1fce7522daa6bab9fce93b95adf592193c904097
# Parent  ba90ecb0231fe7d38522db827515f1d8a4b02045
VT-d: adjust IOMMU interrupt affinities when all CPUs are online

Since these interrupts get setup before APs get brought online, their
affinities naturally could only ever point to CPU 0 alone so far.
Adjust this to include potentially multiple CPUs in the target mask
(when running in one of the cluster modes), and take into account NUMA
information (to handle the interrupts on a CPU on the node where the
respective IOMMU is).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
---


diff -r ba90ecb0231f -r 1fce7522daa6 xen/arch/x86/acpi/power.c
--- a/xen/arch/x86/acpi/power.c Wed Nov 28 10:07:11 2012 +0100
+++ b/xen/arch/x86/acpi/power.c Wed Nov 28 10:08:24 2012 +0100
@@ -219,6 +219,7 @@ static int enter_state(u32 state)
     mtrr_aps_sync_begin();
     enable_nonboot_cpus();
     mtrr_aps_sync_end();
+    adjust_vtd_irq_affinities();
     acpi_dmar_zap();
     thaw_domains();
     system_state = SYS_STATE_active;
diff -r ba90ecb0231f -r 1fce7522daa6 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Wed Nov 28 10:07:11 2012 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Wed Nov 28 10:08:24 2012 +0100
@@ -1971,6 +1971,33 @@ void clear_fault_bits(struct iommu *iomm
     spin_unlock_irqrestore(&iommu->register_lock, flags);
 }
 
+static void adjust_irq_affinity(struct acpi_drhd_unit *drhd)
+{
+    const struct acpi_rhsa_unit *rhsa = drhd_to_rhsa(drhd);
+    unsigned int node = rhsa ? pxm_to_node(rhsa->proximity_domain)
+                             : NUMA_NO_NODE;
+    const cpumask_t *cpumask = &cpu_online_map;
+
+    if ( node < MAX_NUMNODES && node_online(node) &&
+         cpumask_intersects(&node_to_cpumask(node), cpumask) )
+        cpumask = &node_to_cpumask(node);
+    dma_msi_set_affinity(irq_to_desc(drhd->iommu->msi.irq), cpumask);
+}
+
+int adjust_vtd_irq_affinities(void)
+{
+    struct acpi_drhd_unit *drhd;
+
+    if ( !iommu_enabled )
+        return 0;
+
+    for_each_drhd_unit ( drhd )
+        adjust_irq_affinity(drhd);
+
+    return 0;
+}
+__initcall(adjust_vtd_irq_affinities);
+
 static int init_vtd_hw(void)
 {
     struct acpi_drhd_unit *drhd;
@@ -1984,13 +2011,10 @@ static int init_vtd_hw(void)
      */
     for_each_drhd_unit ( drhd )
     {
-        struct irq_desc *desc;
+        adjust_irq_affinity(drhd);
 
         iommu = drhd->iommu;
 
-        desc = irq_to_desc(iommu->msi.irq);
-        dma_msi_set_affinity(desc, desc->arch.cpu_mask);
-
         clear_fault_bits(iommu);
 
         spin_lock_irqsave(&iommu->register_lock, flags);
diff -r ba90ecb0231f -r 1fce7522daa6 xen/include/xen/iommu.h
--- a/xen/include/xen/iommu.h   Wed Nov 28 10:07:11 2012 +0100
+++ b/xen/include/xen/iommu.h   Wed Nov 28 10:08:24 2012 +0100
@@ -137,6 +137,9 @@ int iommu_do_domctl(struct xen_domctl *,
 void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int 
page_count);
 void iommu_iotlb_flush_all(struct domain *d);
 
+/* While VT-d specific, this must get declared in a generic header. */
+int adjust_vtd_irq_affinities(void);
+
 /*
  * The purpose of the iommu_dont_flush_iotlb optional cpu flag is to
  * avoid unecessary iotlb_flush in the low level IOMMU code.

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.