[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC 03/19] xen/arm: follow-up to allow DOM0 manage IRQ and MMIO



The commit 33233c2 "arch/arm: domain build: let dom0 access I/O memory of
mapped series" fill the iomem_caps to allow DOM0 managing MMIO of mapped
device.

A device can be disabled (i.e by adding a property status="disabled" in the
device tree) because the user may want to passthrough this device to a guest.
This will avoid DOM0 loading (and few minutes after unloading) the driver to
handle this device.

Even though, we don't want to let DOM0 using this device, the domain needs
to be able to manage the MMIO/IRQ range. Rework the function map_device
(renamed into handle_device) to:

* For a given device node:
    - Give permission to manage IRQ/MMIO for this device
    - Retrieve the IRQ configuration (i.e edge/level) from the device
    tree
* For available device (i.e status != disabled in the DT)
    - Assign the device to the guest if it's protected by an IOMMU
    - Map the IRQs and MMIOs regions to the guest

Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>
---
 xen/arch/arm/domain_build.c |   66 ++++++++++++++++++++++++++++---------------
 1 file changed, 44 insertions(+), 22 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index c3783cf..6a711cc 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -680,8 +680,14 @@ static int make_timer_node(const struct domain *d, void 
*fdt,
     return res;
 }
 
-/* Map the device in the domain */
-static int map_device(struct domain *d, struct dt_device_node *dev)
+/* For a given device node:
+ *  - Give permission to the guest to manage IRQ and MMIO range
+ *  - Retrieve the IRQ configuration (i.e edge/level) from device tree
+ * When the device is available:
+ *  - Assign the device to the guest if it's protected by an IOMMU
+ *  - Map the IRQs and iomem regions to DOM0
+ */
+static int handle_device(struct domain *d, struct dt_device_node *dev, bool_t 
map)
 {
     unsigned int nirq;
     unsigned int naddr;
@@ -694,9 +700,10 @@ static int map_device(struct domain *d, struct 
dt_device_node *dev)
     nirq = dt_number_of_irq(dev);
     naddr = dt_number_of_address(dev);
 
-    DPRINT("%s nirq = %d naddr = %u\n", dt_node_full_name(dev), nirq, naddr);
+    DPRINT("%s map = %d nirq = %d naddr = %u\n", dt_node_full_name(dev),
+           map, nirq, naddr);
 
-    if ( dt_device_is_protected(dev) )
+    if ( dt_device_is_protected(dev) && map )
     {
         DPRINT("%s setup iommu\n", dt_node_full_name(dev));
         res = iommu_assign_dt_device(d, dev);
@@ -708,7 +715,7 @@ static int map_device(struct domain *d, struct 
dt_device_node *dev)
         }
     }
 
-    /* Map IRQs */
+    /* Give permission and  map IRQs */
     for ( i = 0; i < nirq; i++ )
     {
         res = dt_device_get_raw_irq(dev, i, &rirq);
@@ -741,16 +748,28 @@ static int map_device(struct domain *d, struct 
dt_device_node *dev)
         irq = res;
 
         DPRINT("irq %u = %u\n", i, irq);
-        res = route_irq_to_guest(d, irq, dt_node_name(dev));
+
+        res = irq_permit_access(d, irq);
         if ( res )
         {
-            printk(XENLOG_ERR "Unable to route IRQ %u to domain %u\n",
-                   irq, d->domain_id);
+            printk(XENLOG_ERR "Unable to permit to dom%u access to IRQ %u\n",
+                   d->domain_id, irq);
             return res;
         }
+
+        if ( map )
+        {
+            res = route_irq_to_guest(d, irq, dt_node_name(dev));
+            if ( res )
+            {
+                printk(XENLOG_ERR "Unable to route IRQ %u to domain %u\n",
+                       irq, d->domain_id);
+                return res;
+            }
+        }
     }
 
-    /* Map the address ranges */
+    /* Give permission and map MMIOs */
     for ( i = 0; i < naddr; i++ )
     {
         res = dt_device_get_address(dev, i, &addr, &size);
@@ -774,17 +793,21 @@ static int map_device(struct domain *d, struct 
dt_device_node *dev)
                    addr & PAGE_MASK, PAGE_ALIGN(addr + size) - 1);
             return res;
         }
-        res = map_mmio_regions(d,
-                               paddr_to_pfn(addr & PAGE_MASK),
-                               DIV_ROUND_UP(size, PAGE_SIZE),
-                               paddr_to_pfn(addr & PAGE_MASK));
-        if ( res )
+
+        if ( map )
         {
-            printk(XENLOG_ERR "Unable to map 0x%"PRIx64
-                   " - 0x%"PRIx64" in domain %d\n",
-                   addr & PAGE_MASK, PAGE_ALIGN(addr + size) - 1,
-                   d->domain_id);
-            return res;
+            res = map_mmio_regions(d,
+                                   paddr_to_pfn(addr & PAGE_MASK),
+                                   DIV_ROUND_UP(size, PAGE_SIZE),
+                                   paddr_to_pfn(addr & PAGE_MASK));
+            if ( res )
+            {
+                printk(XENLOG_ERR "Unable to map 0x%"PRIx64
+                       " - 0x%"PRIx64" in domain %d\n",
+                       addr & PAGE_MASK, PAGE_ALIGN(addr + size) - 1,
+                       d->domain_id);
+                return res;
+            }
         }
     }
 
@@ -865,10 +888,9 @@ static int handle_node(struct domain *d, struct 
kernel_info *kinfo,
      *  property. Therefore these device doesn't need to be mapped. This
      *  solution can be use later for pass through.
      */
-    if ( !dt_device_type_is_equal(node, "memory") &&
-         dt_device_is_available(node) )
+    if ( !dt_device_type_is_equal(node, "memory") )
     {
-        res = map_device(d, node);
+        res = handle_device(d, node, dt_device_is_available(node));
 
         if ( res )
             return res;
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.