[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 11/12] AMD/IOMMU: don't needlessly log headers when dumping IRTs


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <JBeulich@xxxxxxxx>
  • Date: Thu, 25 Jul 2019 13:33:24 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1;spf=pass smtp.mailfrom=suse.com;dmarc=pass action=none header.from=suse.com;dkim=pass header.d=suse.com;arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=DdwgO5w1/e/Oe8NbjsPQvBK0IQqvUiu8HRjOue0wvpQ=; b=VQxvuwNWha6Jdq5onlf76ygkIKsJVt9Lw7tnvGkjLLNEquG/rIyCq7edI3FadUrS6XIct7ZXrI4G+fUDwgyR6Wc6BeLBW+TOlVdGryGedVVpbGflNNDfzK6ZNBnVl3bo6insPB6YnsLq4mLzO2dr7Qmc2w+cfm60Pj1VOeEiOFqzZR9utRFB0/JY+HLy4zpuKXWN5tBVxiaiifVHIGg0pbQElbM1sroIa38LSKErU7olTn5LXeE+EeRPGJGnvTu+ZNfLfgT3LMh2PEei7IfcH8zcAl9MZ9OyBnZo4VrkOJn8XQZAdvfL7cZ9hAbPWpFM+u/B0rWaa748jxWr4WNNuA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=UC7y2CReGaAsm5ZVUrobVHjF4NWK9N6ERsQIXVXXumn+BDEqvifAVlzoOzGaMEBGpIf8LxGbWPZQe7YqCtW2En+LzDEI/nXcSEEPuzK/FbBOnMjBiZrBh6ynq6kFHTbSbiDZzPFUE0Y5LcZUUSw9SMQdb7z/wH5KQzynCzjF3MrbQxlO3IEP0Nu4nLLpMa1wajk7cuIJMMkuBXruXVpPcf3PDRr3eS0fufdqRdCCq77kQQZ76BUaVSNcFUJx+Ses3zYzb9Sr84XCM5VE2PRtZ8jr4+7+tbinNBrbaLAAdtzBi47utJYOj8zaOQ5egQC9nds/otlb2IUJ+gvGJqp/+g==
  • Authentication-results: spf=none (sender IP is ) smtp.mailfrom=JBeulich@xxxxxxxx;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Brian Woods <brian.woods@xxxxxxx>, Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
  • Delivery-date: Thu, 25 Jul 2019 13:36:17 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHVQu2IH0zPYIwhDkeWyRcCRQLjtg==
  • Thread-topic: [PATCH v4 11/12] AMD/IOMMU: don't needlessly log headers when dumping IRTs

Log SBDF headers only when there are actual IRTEs to log. This is
particularly important for the total volume of output when the ACPI
tables describe far more than just the existing devices. On my Rome
system so far there was one line for every function of every device on
all 256 buses of segment 0, with extremely few exceptions (like the
IOMMUs themselves).

Also only log one of the "per-device" or "shared" overall headers.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v4: New.

--- a/xen/drivers/passthrough/amd/iommu_intr.c
+++ b/xen/drivers/passthrough/amd/iommu_intr.c
@@ -883,7 +883,8 @@ int __init amd_setup_hpet_msi(struct msi
  }
  
  static void dump_intremap_table(const struct amd_iommu *iommu,
-                                union irte_cptr tbl)
+                                union irte_cptr tbl,
+                                const struct ivrs_mappings *ivrs_mapping)
  {
      unsigned int count;
  
@@ -892,19 +893,25 @@ static void dump_intremap_table(const st
  
      for ( count = 0; count < INTREMAP_ENTRIES; count++ )
      {
-        if ( iommu->ctrl.ga_en )
-        {
-            if ( !tbl.ptr128[count].raw[0] && !tbl.ptr128[count].raw[1] )
+        if ( iommu->ctrl.ga_en
+             ? !tbl.ptr128[count].raw[0] && !tbl.ptr128[count].raw[1]
+             : !tbl.ptr32[count].raw )
                  continue;
+
+        if ( ivrs_mapping )
+        {
+            printk("  %04x:%02x:%02x:%u:\n", iommu->seg,
+                   PCI_BUS(ivrs_mapping->dte_requestor_id),
+                   PCI_SLOT(ivrs_mapping->dte_requestor_id),
+                   PCI_FUNC(ivrs_mapping->dte_requestor_id));
+            ivrs_mapping = NULL;
+        }
+
+        if ( iommu->ctrl.ga_en )
              printk("    IRTE[%03x] %016lx_%016lx\n",
                     count, tbl.ptr128[count].raw[1], tbl.ptr128[count].raw[0]);
-        }
          else
-        {
-            if ( !tbl.ptr32[count].raw )
-                continue;
              printk("    IRTE[%03x] %08x\n", count, tbl.ptr32[count].raw);
-        }
      }
  }
  
@@ -916,13 +923,8 @@ static int dump_intremap_mapping(const s
      if ( !ivrs_mapping )
          return 0;
  
-    printk("  %04x:%02x:%02x:%u:\n", iommu->seg,
-           PCI_BUS(ivrs_mapping->dte_requestor_id),
-           PCI_SLOT(ivrs_mapping->dte_requestor_id),
-           PCI_FUNC(ivrs_mapping->dte_requestor_id));
-
      spin_lock_irqsave(&(ivrs_mapping->intremap_lock), flags);
-    dump_intremap_table(iommu, ivrs_mapping->intremap_table);
+    dump_intremap_table(iommu, ivrs_mapping->intremap_table, ivrs_mapping);
      spin_unlock_irqrestore(&(ivrs_mapping->intremap_lock), flags);
  
      process_pending_softirqs();
@@ -932,17 +934,22 @@ static int dump_intremap_mapping(const s
  
  static void dump_intremap_tables(unsigned char key)
  {
-    unsigned long flags;
-
-    printk("--- Dumping Per-dev IOMMU Interrupt Remapping Table ---\n");
+    if ( !shared_intremap_table )
+    {
+        printk("--- Dumping Per-dev IOMMU Interrupt Remapping Table ---\n");
  
-    iterate_ivrs_entries(dump_intremap_mapping);
+        iterate_ivrs_entries(dump_intremap_mapping);
+    }
+    else
+    {
+        unsigned long flags;
  
-    printk("--- Dumping Shared IOMMU Interrupt Remapping Table ---\n");
+        printk("--- Dumping Shared IOMMU Interrupt Remapping Table ---\n");
  
-    spin_lock_irqsave(&shared_intremap_lock, flags);
-    dump_intremap_table(list_first_entry(&amd_iommu_head, struct amd_iommu,
-                                         list),
-                        shared_intremap_table);
-    spin_unlock_irqrestore(&shared_intremap_lock, flags);
+        spin_lock_irqsave(&shared_intremap_lock, flags);
+        dump_intremap_table(list_first_entry(&amd_iommu_head, struct amd_iommu,
+                                             list),
+                            shared_intremap_table, NULL);
+        spin_unlock_irqrestore(&shared_intremap_lock, flags);
+    }
  }

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.