[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH V3 7/8] xen/grant-dma-ops: Retrieve the ID of backend's domain for DT devices



From: Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx>

Use the presence of "iommus" property pointed to the IOMMU node with
recently introduced "xen,grant-dma" compatible as a clear indicator
of enabling Xen grant mappings scheme for that device and read the ID
of Xen domain where the corresponding backend is running. The domid
(domain ID) is used as an argument to the Xen grant mapping APIs.

To avoid the deferred probe timeout which takes place after reusing
generic IOMMU device tree bindings (because the IOMMU device never
becomes available) enable recently introduced stub IOMMU driver by
selecting XEN_GRANT_DMA_IOMMU.

Also introduce xen_is_grant_dma_device() to check whether xen-grant
DMA ops need to be set for a passed device.

Remove the hardcoded domid 0 in xen_grant_setup_dma_ops().

Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx>
---
Changes RFC -> V1:
   - new patch, split required changes from commit:
    "[PATCH 4/6] virtio: Various updates to xen-virtio DMA ops layer"
   - update checks in xen_virtio_setup_dma_ops() to only support
     DT devices for now
   - remove the "virtio,mmio" check from xen_is_virtio_device()
   - remane everything according to the new naming scheme:
     s/virtio/grant_dma

Changes V1 -> V2:
   - remove dev_is_pci() check in xen_grant_setup_dma_ops()
   - remove EXPORT_SYMBOL_GPL(xen_is_grant_dma_device);

Changes V2 -> V3:
   - Stefano already gave his Reviewed-by, I dropped it due to the changes 
(significant)
   - update commit description
   - reuse generic IOMMU device tree bindings, select XEN_GRANT_DMA_IOMMU
     to avoid the deferred probe timeout
---
 drivers/xen/Kconfig         |  1 +
 drivers/xen/grant-dma-ops.c | 48 ++++++++++++++++++++++++++++++++++++++-------
 include/xen/xen-ops.h       |  5 +++++
 3 files changed, 47 insertions(+), 7 deletions(-)

diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 35d20d9..bfd5f4f 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -347,6 +347,7 @@ config XEN_VIRTIO
        bool "Xen virtio support"
        depends on VIRTIO
        select XEN_GRANT_DMA_OPS
+       select XEN_GRANT_DMA_IOMMU if OF
        help
          Enable virtio support for running as Xen guest. Depending on the
          guest type this will require special support on the backend side
diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
index 44659f4..6586152 100644
--- a/drivers/xen/grant-dma-ops.c
+++ b/drivers/xen/grant-dma-ops.c
@@ -55,11 +55,6 @@ static struct xen_grant_dma_data 
*find_xen_grant_dma_data(struct device *dev)
  * Such a DMA address is formed by using the grant reference as a frame
  * number and setting the highest address bit (this bit is for the backend
  * to be able to distinguish it from e.g. a mmio address).
- *
- * Note that for now we hard wire dom0 to be the backend domain. In order
- * to support any domain as backend we'd need to add a way to communicate
- * the domid of this backend, e.g. via Xenstore, via the PCI-device's
- * config space or DT/ACPI.
  */
 static void *xen_grant_dma_alloc(struct device *dev, size_t size,
                                 dma_addr_t *dma_handle, gfp_t gfp,
@@ -275,9 +270,26 @@ static const struct dma_map_ops xen_grant_dma_ops = {
        .dma_supported = xen_grant_dma_supported,
 };
 
+bool xen_is_grant_dma_device(struct device *dev)
+{
+       struct device_node *iommu_np;
+       bool has_iommu;
+
+       /* XXX Handle only DT devices for now */
+       if (!dev->of_node)
+               return false;
+
+       iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
+       has_iommu = iommu_np && of_device_is_compatible(iommu_np, 
"xen,grant-dma");
+       of_node_put(iommu_np);
+
+       return has_iommu;
+}
+
 void xen_grant_setup_dma_ops(struct device *dev)
 {
        struct xen_grant_dma_data *data;
+       struct of_phandle_args iommu_spec;
 
        data = find_xen_grant_dma_data(dev);
        if (data) {
@@ -285,12 +297,34 @@ void xen_grant_setup_dma_ops(struct device *dev)
                return;
        }
 
+       /* XXX ACPI device unsupported for now */
+       if (!dev->of_node)
+               goto err;
+
+       if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
+                       0, &iommu_spec)) {
+               dev_err(dev, "Cannot parse iommus property\n");
+               goto err;
+       }
+
+       if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
+                       iommu_spec.args_count != 1) {
+               dev_err(dev, "Incompatible IOMMU node\n");
+               of_node_put(iommu_spec.np);
+               goto err;
+       }
+
+       of_node_put(iommu_spec.np);
+
        data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                goto err;
 
-       /* XXX The dom0 is hardcoded as the backend domain for now */
-       data->backend_domid = 0;
+       /*
+        * The endpoint ID here means the ID of the domain where the 
corresponding
+        * backend is running
+        */
+       data->backend_domid = iommu_spec.args[0];
 
        if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
                        GFP_KERNEL))) {
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 4f9fad5..62be9dc 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -223,10 +223,15 @@ static inline void xen_preemptible_hcall_end(void) { }
 
 #ifdef CONFIG_XEN_GRANT_DMA_OPS
 void xen_grant_setup_dma_ops(struct device *dev);
+bool xen_is_grant_dma_device(struct device *dev);
 #else
 static inline void xen_grant_setup_dma_ops(struct device *dev)
 {
 }
+static inline bool xen_is_grant_dma_device(struct device *dev)
+{
+       return false;
+}
 #endif /* CONFIG_XEN_GRANT_DMA_OPS */
 
 #endif /* INCLUDE_XEN_OPS_H */
-- 
2.7.4




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.