[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] xen/passthrough: Support a single iommu_domain per xen domain per SMMU



If multiple devices are being passed through to the same domain and they
share a single SMMU, then they only require a single iommu_domain.

In arm_smmu_assign_dev, before a new iommu_domain is created, the
xen_domain->contexts is checked for any iommu_domains that are already
assigned to device that uses the same SMMU as the current device. If one
is found, attach the device to that iommu_domain. If a new one isn't
found, create a new iommu_domain just like before.

The arm_smmu_deassign_dev function assumes that there is a single
device per iommu_domain. This meant that when the first device was
deassigned, the iommu_domain was freed and when another device was
deassigned a crash occured in xen.

To fix this, a reference counter was added to the iommu_domain struct.
When an arm_smmu_xen_device references an iommu_domain, the
iommu_domains ref is incremented. When that reference is removed, the
iommu_domains ref is decremented. The iommu_domain will only be freed
when the ref is 0.

Signed-off-by: Robbie VanVossen <robert.vanvossen@xxxxxxxxxxxxxxx>

---
Changed since v1:
  * Fixed coding style for comments
  * Move increment/decrement outside of attach/detach functions
  * Expanded xen_domain->lock to protect more of the assign/deassign
    functions
  * Removed iommu_domain add/remove_device functions
---
 xen/drivers/passthrough/arm/smmu.c |   98 +++++++++++++++++++++++++++---------
 1 file changed, 74 insertions(+), 24 deletions(-)

diff --git a/xen/drivers/passthrough/arm/smmu.c 
b/xen/drivers/passthrough/arm/smmu.c
index a7a7da9..1a3de00 100644
--- a/xen/drivers/passthrough/arm/smmu.c
+++ b/xen/drivers/passthrough/arm/smmu.c
@@ -223,6 +223,7 @@ struct iommu_domain
        /* Runtime SMMU configuration for this iommu_domain */
        struct arm_smmu_domain          *priv;
 
+       atomic_t ref;
        /* Used to link iommu_domain contexts for a same domain.
         * There is at least one per-SMMU to used by the domain.
         * */
@@ -2569,7 +2570,9 @@ static int arm_smmu_assign_dev(struct domain *d, u8 devfn,
 {
        struct iommu_domain *domain;
        struct arm_smmu_xen_domain *xen_domain;
-       int ret;
+       struct arm_smmu_device *smmu;
+       int ret = 0;
+       int existing_ctxt_fnd = 0;
 
        xen_domain = domain_hvm_iommu(d)->arch.priv;
 
@@ -2585,36 +2588,77 @@ static int arm_smmu_assign_dev(struct domain *d, u8 
devfn,
                        return ret;
        }
 
+       spin_lock(&xen_domain->lock);
+
        /*
-        * TODO: Share the context bank (i.e iommu_domain) when the device is
-        * under the same SMMU as another device assigned to this domain.
-        * Would it useful for PCI
+        * Check to see if a context bank (iommu_domain) already exists for
+        * this xen domain under the same SMMU
         */
-       domain = xzalloc(struct iommu_domain);
-       if (!domain)
-               return -ENOMEM;
+       if (!list_empty(&xen_domain->contexts)) {
+               smmu = find_smmu_for_device(dev);
+               if (!smmu) {
+                       dev_err(dev, "cannot find SMMU\n");
+                       ret = -ENXIO;
+                       goto out;
+               }
 
-       ret = arm_smmu_domain_init(domain);
-       if (ret)
-               goto err_dom_init;
+               /*
+                * Loop through the &xen_domain->contexts to locate a context
+                * assigned to this SMMU
+                */
+               list_for_each_entry(domain, &xen_domain->contexts, list) {
+                       if(domain->priv->smmu == smmu)
+                       {
+                               /*
+                                * We have found a context already associated 
with the
+                                * same xen domain and SMMU
+                                */
+                               ret = arm_smmu_attach_dev(domain, dev);
+                               if (ret) {
+                                       dev_err(dev,
+                                               "cannot attach device to 
already existing iommu_domain\n");
+                                       goto out;
+                               }
+                               atomic_inc(&domain->ref);
+
+                               existing_ctxt_fnd = 1;
+                               break;
+                       }
+               }
+       }
 
-       domain->priv->cfg.domain = d;
+       if(!existing_ctxt_fnd){
 
-       ret = arm_smmu_attach_dev(domain, dev);
-       if (ret)
-               goto err_attach_dev;
+               domain = xzalloc(struct iommu_domain);
+               if (!domain) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
 
-       spin_lock(&xen_domain->lock);
-       /* Chain the new context to the domain */
-       list_add(&domain->list, &xen_domain->contexts);
-       spin_unlock(&xen_domain->lock);
+               ret = arm_smmu_domain_init(domain);
+               if (ret)
+                       goto err_dom_init;
 
-       return 0;
+               domain->priv->cfg.domain = d;
+
+               ret = arm_smmu_attach_dev(domain, dev);
+               if (ret)
+                       goto err_attach_dev;
+               atomic_inc(&domain->ref);
+
+               /* Chain the new context to the domain */
+               list_add(&domain->list, &xen_domain->contexts);
+               
+       }
+
+       goto out;
 
 err_attach_dev:
        arm_smmu_domain_destroy(domain);
 err_dom_init:
        xfree(domain);
+out:
+       spin_unlock(&xen_domain->lock);
 
        return ret;
 }
@@ -2631,14 +2675,20 @@ static int arm_smmu_deassign_dev(struct domain *d, 
struct device *dev)
                return -ESRCH;
        }
 
+       spin_lock(&xen_domain->lock);
+
        arm_smmu_detach_dev(domain, dev);
+       atomic_dec(&domain->ref);
 
-       spin_lock(&xen_domain->lock);
-       list_del(&domain->list);
-       spin_unlock(&xen_domain->lock);
+       if (domain->ref.counter == 0)
+       {
+               list_del(&domain->list);
 
-       arm_smmu_domain_destroy(domain);
-       xfree(domain);
+               arm_smmu_domain_destroy(domain);
+               xfree(domain);
+       }
+
+       spin_unlock(&xen_domain->lock);
 
        return 0;
 }
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.