[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 3/3] xen: arm: handle PCI DT node ranges and interrupt-map properties



These properties are defined in ePAPR (2.3.8 and 2.4.3.1 respectively)
and the OpenFirmware PCI Bus Binding Specification (IEEE Std 1275-1994).

This replaces the xgene specific mapping. Tested on Mustang and on a
model with a PCI virtio controller.

TODO: Use a helper iterator (e.g. dt_for_each_range) for the ranges
property, like is already done for interrupts using
dt_for_each_irq_map.

TODO: Should we stop calling map_device for nodes beneath this one
(since we should have mapped everything underneath)? I think this is
complex for cases which map interrupt but not ranges or vice versa,
perhaps meaning we need to recurse on them separately. Maybe we can
continue to descend and the mappings may just be harmlessly
instantiated twice.

TODO: Related to the above there are also some buses for which we
cannot use dt_bus_default_{map,translate}. We might want to pull in
the of_bus_pci stuff from Linux, although I think that would be
orthogonal to this fix.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
v2: This is essentially a complete reworking, which actually parses
things properly (obeying #{address,size,interrupt}-cells on the
appriopriate nodes) and includes handling of interrupt-map too.
---
 xen/arch/arm/domain_build.c          |  156 ++++++++++++++++++++++++++++++++++
 xen/arch/arm/platforms/xgene-storm.c |  143 -------------------------------
 2 files changed, 156 insertions(+), 143 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 2a2fc2b..704c2aa 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -23,6 +23,21 @@
 #include <xen/irq.h>
 #include "kernel.h"
 
+/*
+ * Definitions for implementing parts of the OpenFirmware PCI Bus Binding
+ * Specification (IEEE Std 1275-1994).
+ */
+
+struct of_pci_unit_address {
+       __be32 hi, mid, lo;
+} __attribute__((packed));
+
+struct of_pci_ranges_entry {
+       struct of_pci_unit_address      pci_addr;
+       __be64                          cpu_addr;
+       __be64                          length;
+} __attribute__((packed));
+
 static unsigned int __initdata opt_dom0_max_vcpus;
 integer_param("dom0_max_vcpus", opt_dom0_max_vcpus);
 
@@ -911,6 +926,139 @@ static int make_timer_node(const struct domain *d, void 
*fdt,
     return res;
 }
 
+static int map_pci_device_ranges(struct domain *d,
+                                 const struct dt_device_node *dev,
+                                 const struct of_pci_ranges_entry  *ranges,
+                                 const u32 len)
+{
+    int parent_size_cells, parent_addr_cells;
+    int i, nr, res;
+
+    parent_size_cells = dt_n_size_cells(dev);
+    parent_addr_cells = dt_n_addr_cells(dev);
+
+    /*
+     * Range is child address, host address (#address-cells), length
+     * (#size-cells),see ePAPR 2.3.8.
+     *
+     * PCI child address is u32 space + u64 address, see ePAPR 6.2.2.
+     *
+     */
+    nr = len / sizeof(*ranges);
+
+    for ( i = 0; i < nr ; i++ )
+    {
+        const struct of_pci_ranges_entry *range = &ranges[i];
+        u64 addr, len;
+
+        len = fdt64_to_cpu(range->length);
+
+        addr = dt_translate_address(dev, (const __be32 *)&range->cpu_addr);
+        DPRINT("PCI SPACE 0x%08x.%08x.%08x 0x%"PRIx64" size 0x%"PRIx64"\n",
+               fdt32_to_cpu(range->pci_addr.hi),
+               fdt32_to_cpu(range->pci_addr.mid),
+               fdt32_to_cpu(range->pci_addr.lo),
+               addr, len);
+
+        res = map_mmio_regions(d,
+                               paddr_to_pfn(addr & PAGE_MASK),
+                               DIV_ROUND_UP(len, PAGE_SIZE),
+                               paddr_to_pfn(addr & PAGE_MASK));
+        if ( res < 0 )
+        {
+            printk(XENLOG_ERR "Unable to map 0x%"PRIx64
+                   " - 0x%"PRIx64" in domain %d\n",
+                   addr & PAGE_MASK, PAGE_ALIGN(addr + len) - 1,
+                   d->domain_id);
+            return res;
+        }
+    }
+    return 0;
+}
+
+static int map_device_ranges(struct domain *d, const struct dt_device_node 
*dev)
+{
+    const void *ranges;
+    u32 len;
+
+    ranges = dt_get_property(dev, "ranges", &len);
+    /* No ranges, nothing to do */
+    if ( !ranges )
+        return 0;
+
+    if ( dt_device_type_is_equal(dev, "pci") )
+        return map_pci_device_ranges(d, dev, ranges, len);
+
+    printk("Cannot handle ranges for non-PCI device %s type %s\n",
+           dt_node_name(dev), dev->type);
+
+    /* Lets not worry for now... */
+    return 0;
+}
+
+static int map_interrupt_to_domain(const struct dt_device_node *dev,
+                                   const struct dt_raw_irq *dt_raw_irq,
+                                   void *data)
+{
+    struct domain *d = data;
+    struct dt_irq dt_irq;
+    int res;
+
+    res = dt_irq_translate(dt_raw_irq, &dt_irq);
+    if ( res < 0 )
+    {
+        printk(XENLOG_ERR "%s: Failed to translate IRQ: %d\n",
+               dt_node_name(dev), res);
+        return res;
+    }
+
+    if ( dt_irq.irq < NR_LOCAL_IRQS )
+    {
+        printk(XENLOG_ERR "%s: IRQ%"PRId32" is not a SPI\n",
+               dt_node_name(dev), dt_irq.irq);
+        return -EINVAL;
+    }
+
+    /* Setup the IRQ type */
+    res = irq_set_spi_type(dt_irq.irq, dt_irq.type);
+    if ( res )
+    {
+        printk(XENLOG_ERR
+               "%s: Unable to setup IRQ%"PRId32" to dom%d\n",
+               dt_node_name(dev), dt_irq.irq, d->domain_id);
+        return res;
+    }
+
+    res = route_irq_to_guest(d, dt_irq.irq, dt_node_name(dev));
+    if ( res < 0 )
+    {
+        printk(XENLOG_ERR "Unable to map IRQ%"PRId32" to dom%d\n",
+               dt_irq.irq, d->domain_id);
+        return res;
+    }
+
+    DPRINT("PCI IRQ %u mapped to guest", dt_irq.irq);
+
+    return 0;
+}
+
+static int map_device_interrupts(struct domain *d, const struct dt_device_node 
*dev)
+{
+
+    if ( !dt_property_read_bool(dev, "interrupt-map") )
+        return 0; /* No interrupt map to handle */
+
+    if ( dt_device_type_is_equal(dev, "pci") )
+        return dt_for_each_irq_map(dev, &map_interrupt_to_domain, d);
+
+    printk("Cannot handle interrupt-map for non-PCI device %s type %s\n",
+           dt_node_name(dev), dev->type);
+
+    /* Lets not worry for now... */
+    return 0;
+}
+
+
 /* Map the device in the domain */
 static int map_device(struct domain *d, struct dt_device_node *dev)
 {
@@ -1025,6 +1173,14 @@ static int map_device(struct domain *d, struct 
dt_device_node *dev)
         }
     }
 
+    res = map_device_ranges(d, dev);
+    if ( res )
+        return res;
+
+    res = map_device_interrupts(d, dev);
+    if ( res )
+        return res;
+
     return 0;
 }
 
diff --git a/xen/arch/arm/platforms/xgene-storm.c 
b/xen/arch/arm/platforms/xgene-storm.c
index eee650e..2355795 100644
--- a/xen/arch/arm/platforms/xgene-storm.c
+++ b/xen/arch/arm/platforms/xgene-storm.c
@@ -40,148 +40,6 @@ static uint32_t xgene_storm_quirks(void)
     return PLATFORM_QUIRK_GIC_64K_STRIDE|PLATFORM_QUIRK_GUEST_PIRQ_NEED_EOI;
 }
 
-static int map_one_mmio(struct domain *d, const char *what,
-                         unsigned long start, unsigned long end)
-{
-    int ret;
-
-    printk("Additional MMIO %lx-%lx (%s)\n",
-           start, end, what);
-    ret = map_mmio_regions(d, start, end - start, start);
-    if ( ret )
-        printk("Failed to map %s @ %lx to dom%d\n",
-               what, start, d->domain_id);
-    return ret;
-}
-
-static int map_one_spi(struct domain *d, const char *what,
-                       unsigned int spi, unsigned int type)
-{
-    unsigned int irq;
-    int ret;
-
-    irq = spi + 32; /* SPIs start at IRQ 32 */
-
-    ret = irq_set_spi_type(irq, type);
-    if ( ret )
-    {
-        printk("Failed to set the type for IRQ%u\n", irq);
-        return ret;
-    }
-
-    printk("Additional IRQ %u (%s)\n", irq, what);
-
-    if ( !vgic_reserve_virq(d, irq) )
-        printk("Failed to reserve vIRQ %u on dom%d\n",
-               irq, d->domain_id);
-
-    ret = route_irq_to_guest(d, irq, what);
-    if ( ret )
-        printk("Failed to route %s to dom%d\n", what, d->domain_id);
-
-    return ret;
-}
-
-/* Creates MMIO mappings base..end as well as 4 SPIs from the given base. */
-static int xgene_storm_pcie_specific_mapping(struct domain *d,
-                                             const struct dt_device_node *node,
-                                             paddr_t base, paddr_t end,
-                                             int base_spi)
-{
-    int ret;
-
-    printk("Mapping additional regions for PCIe device %s\n",
-           dt_node_full_name(node));
-
-    /* Map the PCIe bus resources */
-    ret = map_one_mmio(d, "PCI MEMORY", paddr_to_pfn(base), paddr_to_pfn(end));
-    if ( ret )
-        goto err;
-
-    ret = map_one_spi(d, "PCI#INTA", base_spi+0, DT_IRQ_TYPE_LEVEL_HIGH);
-    if ( ret )
-        goto err;
-
-    ret = map_one_spi(d, "PCI#INTB", base_spi+1, DT_IRQ_TYPE_LEVEL_HIGH);
-    if ( ret )
-        goto err;
-
-    ret = map_one_spi(d, "PCI#INTC", base_spi+2, DT_IRQ_TYPE_LEVEL_HIGH);
-    if ( ret )
-        goto err;
-
-    ret = map_one_spi(d, "PCI#INTD", base_spi+3, DT_IRQ_TYPE_LEVEL_HIGH);
-    if ( ret )
-        goto err;
-
-    ret = 0;
-err:
-    return ret;
-}
-
-/*
- * Xen does not currently support mapping MMIO regions and interrupt
- * for bus child devices (referenced via the "ranges" and
- * "interrupt-map" properties to domain 0). Instead for now map the
- * necessary resources manually.
- */
-static int xgene_storm_specific_mapping(struct domain *d)
-{
-    struct dt_device_node *node = NULL;
-    int ret;
-
-    while ( (node = dt_find_compatible_node(node, "pci", "apm,xgene-pcie")) )
-    {
-        u64 addr;
-
-        /* Identify the bus via it's control register address */
-        ret = dt_device_get_address(node, 0, &addr, NULL);
-        if ( ret < 0 )
-            return ret;
-
-        if ( !dt_device_is_available(node) )
-            continue;
-
-       switch ( addr )
-        {
-        case 0x1f2b0000: /* PCIe0 */
-            ret = xgene_storm_pcie_specific_mapping(d,
-                node,
-                0x0e000000000UL, 0x10000000000UL, 0xc2);
-            break;
-        case 0x1f2c0000: /* PCIe1 */
-            ret = xgene_storm_pcie_specific_mapping(d,
-                node,
-                0x0d000000000UL, 0x0e000000000UL, 0xc8);
-            break;
-        case 0x1f2d0000: /* PCIe2 */
-            ret = xgene_storm_pcie_specific_mapping(d,
-                node,
-                0x09000000000UL, 0x0a000000000UL, 0xce);
-            break;
-        case 0x1f500000: /* PCIe3 */
-            ret = xgene_storm_pcie_specific_mapping(d,
-                node,
-                0x0a000000000UL, 0x0c000000000UL, 0xd4);
-            break;
-        case 0x1f510000: /* PCIe4 */
-            ret = xgene_storm_pcie_specific_mapping(d,
-                node,
-                0x0c000000000UL, 0x0d000000000UL, 0xda);
-            break;
-
-        default:
-            printk("Ignoring unknown PCI bus %s\n", dt_node_full_name(node));
-            continue;
-        }
-
-        if ( ret < 0 )
-            return ret;
-    }
-
-    return 0;
-}
-
 static void xgene_storm_reset(void)
 {
     void __iomem *addr;
@@ -230,7 +88,6 @@ PLATFORM_START(xgene_storm, "APM X-GENE STORM")
     .init = xgene_storm_init,
     .reset = xgene_storm_reset,
     .quirks = xgene_storm_quirks,
-    .specific_mapping = xgene_storm_specific_mapping,
 
     .dom0_gnttab_start = 0x1f800000,
     .dom0_gnttab_size = 0x20000,
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.