[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-4.1-testing] AMD IOMMU: also spot missing IO-APIC entries in IVRS table


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-4.1-testing <patchbot@xxxxxxx>
  • Date: Tue, 19 Feb 2013 01:44:10 +0000
  • Delivery-date: Tue, 19 Feb 2013 01:44:20 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1360938638 -3600
# Node ID 4d522221fa778a0b1399b8da937b129fe56c7fe2
# Parent  8792a805cc9a7f1a6846d6464a84366b68d83f73
AMD IOMMU: also spot missing IO-APIC entries in IVRS table

Apart from dealing duplicate conflicting entries, we also have to
handle firmware omitting IO-APIC entries in IVRS altogether. Not doing
so has resulted in c/s 26517:601139e2b0db to crash such systems during
boot (whereas with the change here the IOMMU gets disabled just as is
being done in the other cases, i.e. unless global tables are being
used).

Debugging this issue has also pointed out that the debug log output is
pretty ugly to look at - consolidate the output, and add one extra
item for the IVHD special entries, so that future issues are easier
to analyze.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Tested-by: Sander Eikelenboom <linux@xxxxxxxxxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
xen-unstable changeset: 26531:e68f14b9e739
xen-unstable date: Thu Feb 14 08:40:52 UTC 2013
---


diff -r 8792a805cc9a -r 4d522221fa77 xen/drivers/passthrough/amd/iommu_acpi.c
--- a/xen/drivers/passthrough/amd/iommu_acpi.c  Fri Feb 15 11:50:45 2013 +0000
+++ b/xen/drivers/passthrough/amd/iommu_acpi.c  Fri Feb 15 15:30:38 2013 +0100
@@ -346,9 +346,8 @@ static int __init parse_ivmd_block(struc
     base = start_addr & PAGE_MASK;
     limit = (start_addr + mem_length - 1) & PAGE_MASK;
 
-    AMD_IOMMU_DEBUG("IVMD Block: Type 0x%x\n",ivmd_block->header.type);
-    AMD_IOMMU_DEBUG(" Start_Addr_Phys 0x%lx\n", start_addr);
-    AMD_IOMMU_DEBUG(" Mem_Length 0x%lx\n", mem_length);
+    AMD_IOMMU_DEBUG("IVMD Block: type %#x phys %#lx len %#lx\n",
+                    ivmd_block->header.type, start_addr, mem_length);
 
     if ( get_field_from_byte(ivmd_block->header.flags,
                              AMD_IOMMU_ACPI_EXCLUSION_RANGE_MASK,
@@ -550,8 +549,8 @@ static u16 __init parse_ivhd_device_alia
         return 0;
     }
 
-    AMD_IOMMU_DEBUG(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf);
-    AMD_IOMMU_DEBUG(" Dev_Id Alias: 0x%x\n", alias_id);
+    AMD_IOMMU_DEBUG(" Dev_Id Range: %#x -> %#x alias %#x\n",
+                    first_bdf, last_bdf, alias_id);
 
     for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
         add_ivrs_mapping_entry(bdf, alias_id, ivhd_device->header.flags, 
iommu);
@@ -652,6 +651,9 @@ static u16 __init parse_ivhd_device_spec
         return 0;
     }
 
+    AMD_IOMMU_DEBUG("IVHD Special: %02x:%02x.%u variety %#x handle %#x\n",
+                    PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf),
+                    ivhd_device->special.variety, ivhd_device->special.handle);
     add_ivrs_mapping_entry(bdf, bdf, ivhd_device->header.flags, iommu);
 
     if ( ivhd_device->special.variety != 1 /* ACPI_IVHD_IOAPIC */ )
@@ -737,10 +739,9 @@ static int __init parse_ivhd_block(struc
         ivhd_device = (union acpi_ivhd_device *)
             ((u8 *)ivhd_block + block_length);
 
-        AMD_IOMMU_DEBUG( "IVHD Device Entry:\n");
-        AMD_IOMMU_DEBUG( " Type 0x%x\n", ivhd_device->header.type);
-        AMD_IOMMU_DEBUG( " Dev_Id 0x%x\n", ivhd_device->header.dev_id);
-        AMD_IOMMU_DEBUG( " Flags 0x%x\n", ivhd_device->header.flags);
+        AMD_IOMMU_DEBUG("IVHD Device Entry: type %#x id %#x flags %#x\n",
+                        ivhd_device->header.type, ivhd_device->header.dev_id,
+                        ivhd_device->header.flags);
 
         switch ( ivhd_device->header.type )
         {
@@ -867,6 +868,7 @@ static int __init parse_ivrs_table(struc
 {
     struct acpi_ivrs_block_header *ivrs_block;
     unsigned long length;
+    unsigned int apic;
     int error = 0;
     struct acpi_table_header *table = (struct acpi_table_header *)_table;
 
@@ -882,11 +884,9 @@ static int __init parse_ivrs_table(struc
         ivrs_block = (struct acpi_ivrs_block_header *)
             ((u8 *)table + length);
 
-        AMD_IOMMU_DEBUG("IVRS Block:\n");
-        AMD_IOMMU_DEBUG(" Type 0x%x\n", ivrs_block->type);
-        AMD_IOMMU_DEBUG(" Flags 0x%x\n", ivrs_block->flags);
-        AMD_IOMMU_DEBUG(" Length 0x%x\n", ivrs_block->length);
-        AMD_IOMMU_DEBUG(" Dev_Id 0x%x\n", ivrs_block->dev_id);
+        AMD_IOMMU_DEBUG("IVRS Block: type %#x flags %#x len %#x id %#x\n",
+                        ivrs_block->type, ivrs_block->flags,
+                        ivrs_block->length, ivrs_block->dev_id);
 
         if ( table->length < (length + ivrs_block->length) )
         {
@@ -901,6 +901,29 @@ static int __init parse_ivrs_table(struc
         length += ivrs_block->length;
     }
 
+    /* Each IO-APIC must have been mentioned in the table. */
+    for ( apic = 0; !error && apic < nr_ioapics; ++apic )
+    {
+        if ( !nr_ioapic_registers[apic] ||
+             ioapic_bdf[IO_APIC_ID(apic)].pin_setup )
+            continue;
+
+        printk(XENLOG_ERR "IVHD Error: no information for IO-APIC %#x\n",
+               IO_APIC_ID(apic));
+        if ( amd_iommu_perdev_intremap )
+            error = -ENXIO;
+        else
+        {
+            ioapic_bdf[IO_APIC_ID(apic)].pin_setup = xzalloc_array(
+                unsigned long, BITS_TO_LONGS(nr_ioapic_registers[apic]));
+            if ( !ioapic_bdf[IO_APIC_ID(apic)].pin_setup )
+            {
+                printk(XENLOG_ERR "IVHD Error: Out of memory\n");
+                error = -ENOMEM;
+            }
+        }
+    }
+
     return error;
 }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.