[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] AMD/IOMMU: provide function backing XENMEM_reserved_device_memory_map



commit 2209c36007ec209ab1189ac0bfd5144a2db80090
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed Sep 22 16:16:28 2021 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Sep 22 16:16:28 2021 +0200

    AMD/IOMMU: provide function backing XENMEM_reserved_device_memory_map
    
    Just like for VT-d, exclusion / unity map ranges would better be
    reflected in e.g. the guest's E820 map. The reporting infrastructure
    was put in place still pretty tailored to VT-d's needs; extend
    get_reserved_device_memory() to allow vendor specific code to probe
    whether a particular (seg,bus,dev,func) tuple would get its data
    actually recorded. I admit the de-duplication of entries is quite
    limited for now, but considering our trouble to find a system
    surfacing _any_ IVMD this is likely not a critical issue for this
    initial implementation.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Paul Durrant <paul@xxxxxxx>
---
 xen/common/memory.c                         |  3 ++
 xen/drivers/passthrough/amd/iommu.h         |  2 +
 xen/drivers/passthrough/amd/iommu_acpi.c    | 20 +++++---
 xen/drivers/passthrough/amd/iommu_map.c     | 75 +++++++++++++++++++++++++++++
 xen/drivers/passthrough/amd/pci_amd_iommu.c |  1 +
 5 files changed, 95 insertions(+), 6 deletions(-)

diff --git a/xen/common/memory.c b/xen/common/memory.c
index 63642278fd..f333c994c8 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1042,6 +1042,9 @@ static int get_reserved_device_memory(xen_pfn_t start, 
xen_ulong_t nr,
     if ( !(grdm->map.flags & XENMEM_RDM_ALL) && (sbdf != id) )
         return 0;
 
+    if ( !nr )
+        return 1;
+
     if ( grdm->used_entries < grdm->map.nr_entries )
     {
         struct xen_reserved_device_memory rdm = {
diff --git a/xen/drivers/passthrough/amd/iommu.h 
b/xen/drivers/passthrough/amd/iommu.h
index 721d0c395b..b0e6322906 100644
--- a/xen/drivers/passthrough/amd/iommu.h
+++ b/xen/drivers/passthrough/amd/iommu.h
@@ -110,6 +110,7 @@ struct amd_iommu {
 struct ivrs_unity_map {
     bool read:1;
     bool write:1;
+    bool global:1;
     paddr_t addr;
     unsigned long length;
     struct ivrs_unity_map *next;
@@ -236,6 +237,7 @@ int amd_iommu_reserve_domain_unity_map(struct domain 
*domain,
                                        unsigned int flag);
 int amd_iommu_reserve_domain_unity_unmap(struct domain *d,
                                          const struct ivrs_unity_map *map);
+int amd_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt);
 int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
                                              unsigned long page_count,
                                              unsigned int flush_flags);
diff --git a/xen/drivers/passthrough/amd/iommu_acpi.c 
b/xen/drivers/passthrough/amd/iommu_acpi.c
index bc3c946fe5..0860b23c88 100644
--- a/xen/drivers/passthrough/amd/iommu_acpi.c
+++ b/xen/drivers/passthrough/amd/iommu_acpi.c
@@ -143,7 +143,7 @@ static int __init reserve_iommu_exclusion_range(
 
 static int __init reserve_unity_map_for_device(
     uint16_t seg, uint16_t bdf, unsigned long base,
-    unsigned long length, bool iw, bool ir)
+    unsigned long length, bool iw, bool ir, bool global)
 {
     struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
     struct ivrs_unity_map *unity_map = ivrs_mappings[bdf].unity_map;
@@ -162,7 +162,11 @@ static int __init reserve_unity_map_for_device(
          */
         if ( base == unity_map->addr && length == unity_map->length &&
              ir == unity_map->read && iw == unity_map->write )
+        {
+            if ( global )
+                unity_map->global = true;
             return 0;
+        }
 
         if ( unity_map->addr + unity_map->length > base &&
              base + length > unity_map->addr )
@@ -181,6 +185,7 @@ static int __init reserve_unity_map_for_device(
 
     unity_map->read = ir;
     unity_map->write = iw;
+    unity_map->global = global;
     unity_map->addr = base;
     unity_map->length = length;
     unity_map->next = ivrs_mappings[bdf].unity_map;
@@ -220,7 +225,8 @@ static int __init register_range_for_all_devices(
 
         /* reserve r/w unity-mapped page entries for devices */
         for ( bdf = rc = 0; !rc && bdf < ivrs_bdf_entries; bdf++ )
-            rc = reserve_unity_map_for_device(seg, bdf, base, length, iw, ir);
+            rc = reserve_unity_map_for_device(seg, bdf, base, length, iw, ir,
+                                              true);
     }
 
     return rc;
@@ -253,8 +259,10 @@ static int __init register_range_for_device(
         paddr_t length = limit + PAGE_SIZE - base;
 
         /* reserve unity-mapped page entries for device */
-        rc = reserve_unity_map_for_device(seg, bdf, base, length, iw, ir) ?:
-             reserve_unity_map_for_device(seg, req, base, length, iw, ir);
+        rc = reserve_unity_map_for_device(seg, bdf, base, length, iw, ir,
+                                          false) ?:
+             reserve_unity_map_for_device(seg, req, base, length, iw, ir,
+                                          false);
     }
     else
     {
@@ -290,9 +298,9 @@ static int __init register_range_for_iommu_devices(
 
         req = get_ivrs_mappings(iommu->seg)[bdf].dte_requestor_id;
         rc = reserve_unity_map_for_device(iommu->seg, bdf, base, length,
-                                          iw, ir) ?:
+                                          iw, ir, false) ?:
              reserve_unity_map_for_device(iommu->seg, req, base, length,
-                                          iw, ir);
+                                          iw, ir, false);
     }
 
     return rc;
diff --git a/xen/drivers/passthrough/amd/iommu_map.c 
b/xen/drivers/passthrough/amd/iommu_map.c
index 10fda5519c..93501ee2c5 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -462,6 +462,81 @@ int amd_iommu_reserve_domain_unity_unmap(struct domain *d,
     return rc;
 }
 
+int amd_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
+{
+    unsigned int seg = 0 /* XXX */, bdf;
+    const struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
+    /* At least for global entries, avoid reporting them multiple times. */
+    enum { pending, processing, done } global = pending;
+
+    for ( bdf = 0; bdf < ivrs_bdf_entries; ++bdf )
+    {
+        pci_sbdf_t sbdf = PCI_SBDF2(seg, bdf);
+        const struct ivrs_unity_map *um = ivrs_mappings[bdf].unity_map;
+        unsigned int req = ivrs_mappings[bdf].dte_requestor_id;
+        const struct amd_iommu *iommu = ivrs_mappings[bdf].iommu;
+        int rc;
+
+        if ( !iommu )
+        {
+            /* May need to trigger the workaround in find_iommu_for_device(). 
*/
+            const struct pci_dev *pdev;
+
+            pcidevs_lock();
+            pdev = pci_get_pdev(seg, sbdf.bus, sbdf.devfn);
+            pcidevs_unlock();
+
+            if ( pdev )
+                iommu = find_iommu_for_device(seg, bdf);
+            if ( !iommu )
+                continue;
+        }
+
+        if ( func(0, 0, sbdf.sbdf, ctxt) )
+        {
+            /*
+             * When the caller processes a XENMEM_RDM_ALL request, don't report
+             * multiple times the same range(s) for perhaps many devices with
+             * the same alias ID.
+             */
+            if ( bdf != req && ivrs_mappings[req].iommu &&
+                 func(0, 0, PCI_SBDF2(seg, req).sbdf, ctxt) )
+                continue;
+
+            if ( global == pending )
+                global = processing;
+        }
+
+        if ( iommu->exclusion_enable &&
+             (iommu->exclusion_allow_all ?
+              global == processing :
+              ivrs_mappings[bdf].dte_allow_exclusion) )
+        {
+            rc = func(PFN_DOWN(iommu->exclusion_base),
+                      PFN_UP(iommu->exclusion_limit | 1) -
+                      PFN_DOWN(iommu->exclusion_base), sbdf.sbdf, ctxt);
+            if ( unlikely(rc < 0) )
+                return rc;
+        }
+
+        for ( ; um; um = um->next )
+        {
+            if ( um->global && global != processing )
+                continue;
+
+            rc = func(PFN_DOWN(um->addr), PFN_DOWN(um->length),
+                      sbdf.sbdf, ctxt);
+            if ( unlikely(rc < 0) )
+                return rc;
+        }
+
+        if ( global == processing )
+            global = done;
+    }
+
+    return 0;
+}
+
 int __init amd_iommu_quarantine_init(struct domain *d)
 {
     struct domain_iommu *hd = dom_iommu(d);
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c 
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index d2678b365a..86e4864e5d 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -652,6 +652,7 @@ static const struct iommu_ops __initconstrel _iommu_ops = {
     .suspend = amd_iommu_suspend,
     .resume = amd_iommu_resume,
     .crash_shutdown = amd_iommu_crash_shutdown,
+    .get_reserved_device_memory = amd_iommu_get_reserved_device_memory,
     .dump_page_tables = amd_dump_page_tables,
 };
 
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.