[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[XEN][PATCH v8 09/19] xen/iommu: Move spin_lock from iommu_dt_device_is_assigned to caller


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Vikram Garhwal <vikram.garhwal@xxxxxxx>
  • Date: Wed, 16 Aug 2023 17:39:37 -0700
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=qIyUzT1AUXykQrgYpXKigDmIsrdrXXxfGP25yLD3dds=; b=WOICDTJwv/fZMYQxINixdg/BUBvxrQYkXgdT55+G5EQ9OAcw4qMoNAZS9zbaCN0w3H+4pVb3daKNmFJidyWxiTEku8Wxg1Wd/I+sLDwZKfrHQK8yxLU7XN28wPgEJj8w89QC33Qvj+TCxg5c0LJ8rX6qaxVvn1sT96J6UAxqAp/A+VvZdWAfBvDi1JgxSJFrh2NbUdks6Njfpj67Z2GElzw6Mmn1KBuP2zwDx+vvPJfQyGBWnt3L9e2XkoCBoI9Tvb+Rym/h1Sdy25G+/JiwPqHmjrXXjio80+dm6BUKoRaJPmYlq6saQ9YVf2zEl0/WsxHcV5TS7q79ZTkGu+DOzw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=hevRedi/HuvpbuYXgNJq0iGgLB1N99aAi0qJASATrvYuRLURGh5seYbaA2/yeFHaXQBJ684jwKmBgsAIB9EO0auzOfy1/k0mIk3BgEMLURg36wB10VSJQiTrHxUPNgAgp2vvoapdeV22BhzJadkiiurK5Yy5Naq7HL9hioWUHE3B6hpxun9/+7MlP9LPrQDMxiRfOMuMuYCs6vSf5JCD5SC1cZX+uh/aj5o1FNd1iNzhinA6S2CAjCrJm3++cI9iypOEioFC44KbmDxlGXUzE9M3AWzkbX9c8RTNE9r3bivIpxW5bAi4zVvLysXZqpeCxUWq7sAy3vD3cUK5svxVhw==
  • Cc: <michal.orzel@xxxxxxx>, <sstabellini@xxxxxxxxxx>, <vikram.garhwal@xxxxxxx>, Julien Grall <julien@xxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, "Jan Beulich" <jbeulich@xxxxxxxx>, Wei Liu <wl@xxxxxxx>
  • Delivery-date: Thu, 17 Aug 2023 00:50:48 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Rename iommu_dt_device_is_assigned() to iommu_dt_device_is_assigned_locked().
Remove static type so this can also be used by SMMU drivers to check if the
device is being used before removing.

Moving spin_lock to caller was done to prevent the concurrent access to
iommu_dt_device_is_assigned while doing add/remove/assign/deassign. Follow-up
patches in this series introduces node add/remove feature.

Also, caller is required hold the correct lock so moved the function prototype
to a private header.

Signed-off-by: Vikram Garhwal <vikram.garhwal@xxxxxxx>

---
Changes from v7:
    Update commit message.
    Add ASSERT().
---
---
 xen/drivers/passthrough/device_tree.c | 26 +++++++++++++++++++++----
 xen/include/xen/iommu-private.h       | 28 +++++++++++++++++++++++++++
 2 files changed, 50 insertions(+), 4 deletions(-)
 create mode 100644 xen/include/xen/iommu-private.h

diff --git a/xen/drivers/passthrough/device_tree.c 
b/xen/drivers/passthrough/device_tree.c
index 1c32d7b50c..5796ee1f93 100644
--- a/xen/drivers/passthrough/device_tree.c
+++ b/xen/drivers/passthrough/device_tree.c
@@ -18,6 +18,7 @@
 #include <xen/device_tree.h>
 #include <xen/guest_access.h>
 #include <xen/iommu.h>
+#include <xen/iommu-private.h>
 #include <xen/lib.h>
 #include <xen/sched.h>
 #include <xsm/xsm.h>
@@ -83,16 +84,16 @@ fail:
     return rc;
 }
 
-static bool_t iommu_dt_device_is_assigned(const struct dt_device_node *dev)
+bool_t iommu_dt_device_is_assigned_locked(const struct dt_device_node *dev)
 {
     bool_t assigned = 0;
 
+    ASSERT(spin_is_locked(&dtdevs_lock));
+
     if ( !dt_device_is_protected(dev) )
         return 0;
 
-    spin_lock(&dtdevs_lock);
     assigned = !list_empty(&dev->domain_list);
-    spin_unlock(&dtdevs_lock);
 
     return assigned;
 }
@@ -213,27 +214,44 @@ int iommu_do_dt_domctl(struct xen_domctl *domctl, struct 
domain *d,
         if ( (d && d->is_dying) || domctl->u.assign_device.flags )
             break;
 
+        /*
+         * To ensure that the dev doesn't disappear between the time we look it
+         * up with dt_find_node_by_gpath() and we check the assignment later.
+         */
+        spin_lock(&dtdevs_lock);
+
         ret = dt_find_node_by_gpath(domctl->u.assign_device.u.dt.path,
                                     domctl->u.assign_device.u.dt.size,
                                     &dev);
         if ( ret )
+        {
+            spin_unlock(&dtdevs_lock);
             break;
+        }
 
         ret = xsm_assign_dtdevice(XSM_HOOK, d, dt_node_full_name(dev));
         if ( ret )
+        {
+            spin_unlock(&dtdevs_lock);
             break;
+        }
 
         if ( domctl->cmd == XEN_DOMCTL_test_assign_device )
         {
-            if ( iommu_dt_device_is_assigned(dev) )
+
+            if ( iommu_dt_device_is_assigned_locked(dev) )
             {
                 printk(XENLOG_G_ERR "%s already assigned.\n",
                        dt_node_full_name(dev));
                 ret = -EINVAL;
             }
+
+            spin_unlock(&dtdevs_lock);
             break;
         }
 
+        spin_unlock(&dtdevs_lock);
+
         if ( d == dom_io )
             return -EINVAL;
 
diff --git a/xen/include/xen/iommu-private.h b/xen/include/xen/iommu-private.h
new file mode 100644
index 0000000000..bb5c94e7a6
--- /dev/null
+++ b/xen/include/xen/iommu-private.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * xen/iommu-private.h
+ */
+#ifndef __XEN_IOMMU_PRIVATE_H__
+#define __XEN_IOMMU_PRIVATE_H__
+
+#ifdef CONFIG_HAS_DEVICE_TREE
+#include <xen/device_tree.h>
+
+/*
+ * Checks if dt_device_node is assigned to a domain or not. This function
+ * expects to be called with dtdevs_lock acquired by caller.
+ */
+bool_t iommu_dt_device_is_assigned_locked(const struct dt_device_node *dev);
+#endif
+
+#endif /* __XEN_IOMMU_PRIVATE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
-- 
2.17.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.