[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v14 7/9] vtd: add lookup_page method to iommu_ops



This patch adds a new method to the VT-d IOMMU implementation to find the
MFN currently mapped by the specified DFN along with a wrapper function
in generic IOMMU code to call the implementation if it exists.

NOTE: This patch only adds a Xen-internal interface. This will be used by
      a subsequent patch.
      Another subsequent patch will add similar functionality for AMD
      IOMMUs.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxx>

v11:
 - Fold in patch to change failure semantics (already ack-ed by Kevin).

v10:
 - Adjust the locking comment.

v9:
 - Add comment about locking in xen/iommu.h.

v8:
 - Remove clean-up as this is now done by a prior patch.
 - Make intel_iommu_lookup_page() return dfn value if using shared EPT
   or iommu_passthrough is set, as requested by Kevin.

v7:
 - Re-base and re-name BFN -> DFN.
 - Add missing checks for shared EPT and iommu_passthrough.
 - Remove unnecessary initializers and use array-style dereference.
 - Drop Wei's R-b because of code churn.

v3:
 - Addressed comments from George.

v2:
 - Addressed some comments from Jan.
---
 xen/drivers/passthrough/iommu.c     | 11 ++++++++++
 xen/drivers/passthrough/vtd/iommu.c | 41 +++++++++++++++++++++++++++++++++++++
 xen/drivers/passthrough/vtd/iommu.h |  3 +++
 xen/include/xen/iommu.h             | 10 +++++++++
 4 files changed, 65 insertions(+)

diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index c0dd8e494f..d51fffae40 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -327,6 +327,17 @@ int iommu_unmap_page(struct domain *d, dfn_t dfn)
     return rc;
 }
 
+int iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
+                      unsigned int *flags)
+{
+    const struct domain_iommu *hd = dom_iommu(d);
+
+    if ( !iommu_enabled || !hd->platform_ops )
+        return -EOPNOTSUPP;
+
+    return hd->platform_ops->lookup_page(d, dfn, mfn, flags);
+}
+
 static void iommu_free_pagetables(unsigned long unused)
 {
     do {
diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index 9187d50730..f94b522c73 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1833,6 +1833,46 @@ static int __must_check intel_iommu_unmap_page(struct 
domain *d,
     return dma_pte_clear_one(d, dfn_to_daddr(dfn));
 }
 
+static int intel_iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
+                                   unsigned int *flags)
+{
+    struct domain_iommu *hd = dom_iommu(d);
+    struct dma_pte *page, val;
+    u64 pg_maddr;
+
+    /*
+     * If VT-d shares EPT page table or if the domain is the hardware
+     * domain and iommu_passthrough is set then pass back the dfn.
+     */
+    if ( iommu_use_hap_pt(d) ||
+         (iommu_hwdom_passthrough && is_hardware_domain(d)) )
+        return -EOPNOTSUPP;
+
+    spin_lock(&hd->arch.mapping_lock);
+
+    pg_maddr = addr_to_dma_page_maddr(d, dfn_to_daddr(dfn), 0);
+    if ( !pg_maddr )
+    {
+        spin_unlock(&hd->arch.mapping_lock);
+        return -ENOMEM;
+    }
+
+    page = map_vtd_domain_page(pg_maddr);
+    val = page[dfn_x(dfn) & LEVEL_MASK];
+
+    unmap_vtd_domain_page(page);
+    spin_unlock(&hd->arch.mapping_lock);
+
+    if ( !dma_pte_present(val) )
+        return -ENOENT;
+
+    *mfn = maddr_to_mfn(dma_pte_addr(val));
+    *flags = dma_pte_read(val) ? IOMMUF_readable : 0;
+    *flags |= dma_pte_write(val) ? IOMMUF_writable : 0;
+
+    return 0;
+}
+
 int iommu_pte_flush(struct domain *d, uint64_t dfn, uint64_t *pte,
                     int order, int present)
 {
@@ -2658,6 +2698,7 @@ const struct iommu_ops intel_iommu_ops = {
     .teardown = iommu_domain_teardown,
     .map_page = intel_iommu_map_page,
     .unmap_page = intel_iommu_unmap_page,
+    .lookup_page = intel_iommu_lookup_page,
     .free_page_table = iommu_free_page_table,
     .reassign_device = reassign_device_ownership,
     .get_device_group_id = intel_iommu_group_id,
diff --git a/xen/drivers/passthrough/vtd/iommu.h 
b/xen/drivers/passthrough/vtd/iommu.h
index 72c1a2e3cd..47bdfcb5ea 100644
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -272,6 +272,9 @@ struct dma_pte {
 #define dma_set_pte_prot(p, prot) do { \
         (p).val = ((p).val & ~DMA_PTE_PROT) | ((prot) & DMA_PTE_PROT); \
     } while (0)
+#define dma_pte_prot(p) ((p).val & DMA_PTE_PROT)
+#define dma_pte_read(p) (dma_pte_prot(p) & DMA_PTE_READ)
+#define dma_pte_write(p) (dma_pte_prot(p) & DMA_PTE_WRITE)
 #define dma_pte_addr(p) ((p).val & PADDR_MASK & PAGE_MASK_4K)
 #define dma_set_pte_addr(p, addr) do {\
             (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 7313957c81..9ae8321bb4 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -92,6 +92,8 @@ void iommu_teardown(struct domain *d);
 int __must_check iommu_map_page(struct domain *d, dfn_t dfn,
                                 mfn_t mfn, unsigned int flags);
 int __must_check iommu_unmap_page(struct domain *d, dfn_t dfn);
+int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
+                                   unsigned int *flags);
 
 enum iommu_feature
 {
@@ -179,9 +181,17 @@ struct iommu_ops {
 #endif /* HAS_PCI */
 
     void (*teardown)(struct domain *d);
+
+    /*
+     * This block of operations must be appropriately locked against each
+     * other by the caller in order to have meaningful results.
+     */
     int __must_check (*map_page)(struct domain *d, dfn_t dfn, mfn_t mfn,
                                  unsigned int flags);
     int __must_check (*unmap_page)(struct domain *d, dfn_t dfn);
+    int __must_check (*lookup_page)(struct domain *d, dfn_t dfn, mfn_t *mfn,
+                                    unsigned int *flags);
+
     void (*free_page_table)(struct page_info *);
 #ifdef CONFIG_X86
     void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned 
int value);
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.