[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 5/7] amd-iommu: introduce new get/set_iommu_pde_info() functions...



...and use set_iommu_pde_info() in set_iommu_pde_present().

set_iommu_pde_info() only sets the address and read/write flags in the PDE,
leaving the (PTE-only) FC bit, level value and presence bit to be
subsequently set by set_iommu_pde_present(). A memory barrier is added to
ensure that the presence bit is last to be set.

A subsequent patch will make further use of get_iommu_pde_info().

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Cc: Brian Woods <brian.woods@xxxxxxx>
---
 xen/drivers/passthrough/amd/iommu_map.c | 88 +++++++++++++++++++++------------
 1 file changed, 56 insertions(+), 32 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_map.c 
b/xen/drivers/passthrough/amd/iommu_map.c
index a186c8d28b..fecde9d645 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -45,15 +45,10 @@ void clear_iommu_pte_present(unsigned long l1_mfn, unsigned 
long dfn)
     unmap_domain_page(table);
 }
 
-static bool set_iommu_pde_present(uint32_t *pde, unsigned long next_mfn,
-                                  unsigned int next_level,
-                                  bool iw, bool ir)
+static void get_iommu_pde_info(uint32_t *pde, uint64_t *maddr, bool *iw,
+                               bool *ir)
 {
-    uint64_t addr_lo, addr_hi, maddr_old, maddr_next;
-    uint32_t entry;
-    bool need_flush = false;
-
-    maddr_next = (uint64_t)next_mfn << PAGE_SHIFT;
+    uint64_t addr_lo, addr_hi;
 
     addr_hi = get_field_from_reg_u32(pde[1],
                                      IOMMU_PTE_ADDR_HIGH_MASK,
@@ -61,45 +56,74 @@ static bool set_iommu_pde_present(uint32_t *pde, unsigned 
long next_mfn,
     addr_lo = get_field_from_reg_u32(pde[0],
                                      IOMMU_PTE_ADDR_LOW_MASK,
                                      IOMMU_PTE_ADDR_LOW_SHIFT);
+    *maddr = (addr_hi << 32) | (addr_lo << PAGE_SHIFT);
 
-    maddr_old = (addr_hi << 32) | (addr_lo << PAGE_SHIFT);
+    if ( iw )
+        *iw = !!get_field_from_reg_u32(pde[1],
+                                       IOMMU_PDE_IO_WRITE_PERMISSION_MASK,
+                                       IOMMU_PDE_IO_WRITE_PERMISSION_SHIFT);
+
+    if ( ir )
+        *ir = !!get_field_from_reg_u32(pde[1],
+                                       IOMMU_PDE_IO_READ_PERMISSION_MASK,
+                                       IOMMU_PDE_IO_READ_PERMISSION_SHIFT);
+}
+
+static bool set_iommu_pde_info(uint32_t *pde, uint64_t maddr, bool iw,
+                               bool ir)
+{
+    uint64_t addr_lo, addr_hi, maddr_old;
 
-    if ( maddr_old != maddr_next )
-        need_flush = 1;
+    get_iommu_pde_info(pde, &maddr_old, NULL, NULL);
 
-    addr_lo = maddr_next & DMA_32BIT_MASK;
-    addr_hi = maddr_next >> 32;
+    addr_lo = (maddr & DMA_32BIT_MASK) >> PAGE_SHIFT;
+    addr_hi = maddr >> 32;
 
-    /* enable read/write permissions,which will be enforced at the PTE */
     set_field_in_reg_u32((uint32_t)addr_hi, 0,
                          IOMMU_PDE_ADDR_HIGH_MASK,
-                         IOMMU_PDE_ADDR_HIGH_SHIFT, &entry);
-    set_field_in_reg_u32(iw, entry,
+                         IOMMU_PDE_ADDR_HIGH_SHIFT, &pde[1]);
+    set_field_in_reg_u32(iw, pde[1],
                          IOMMU_PDE_IO_WRITE_PERMISSION_MASK,
-                         IOMMU_PDE_IO_WRITE_PERMISSION_SHIFT, &entry);
-    set_field_in_reg_u32(ir, entry,
+                         IOMMU_PDE_IO_WRITE_PERMISSION_SHIFT, &pde[1]);
+    set_field_in_reg_u32(ir, pde[1],
                          IOMMU_PDE_IO_READ_PERMISSION_MASK,
-                         IOMMU_PDE_IO_READ_PERMISSION_SHIFT, &entry);
+                         IOMMU_PDE_IO_READ_PERMISSION_SHIFT, &pde[1]);
+    set_field_in_reg_u32((uint32_t)addr_lo, 0,
+                         IOMMU_PDE_ADDR_LOW_MASK,
+                         IOMMU_PDE_ADDR_LOW_SHIFT, &pde[0]);
+
+    return maddr != maddr_old;
+}
+
+static bool set_iommu_pde_present(uint32_t *pde, unsigned long next_mfn,
+                                  unsigned int next_level,
+                                  bool_t iw, bool_t ir)
+{
+    bool need_flush = set_iommu_pde_info(pde, next_mfn << PAGE_SHIFT, iw,
+                                         ir);
 
-    /* FC bit should be enabled in PTE, this helps to solve potential
-     * issues with ATS devices
+    /*
+     * FC bit should be enabled in PTE, this helps to solve potential
+     * issues with ATS devices.
      */
     if ( next_level == IOMMU_PAGING_MODE_LEVEL_0 )
-        set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
-                             IOMMU_PTE_FC_MASK, IOMMU_PTE_FC_SHIFT, &entry);
-    pde[1] = entry;
+        set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, pde[1],
+                             IOMMU_PTE_FC_MASK, IOMMU_PTE_FC_SHIFT,
+                             &pde[1]);
 
     /* mark next level as 'present' */
-    set_field_in_reg_u32((uint32_t)addr_lo >> PAGE_SHIFT, 0,
-                         IOMMU_PDE_ADDR_LOW_MASK,
-                         IOMMU_PDE_ADDR_LOW_SHIFT, &entry);
-    set_field_in_reg_u32(next_level, entry,
+    set_field_in_reg_u32(next_level, pde[0],
                          IOMMU_PDE_NEXT_LEVEL_MASK,
-                         IOMMU_PDE_NEXT_LEVEL_SHIFT, &entry);
-    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_PDE_NEXT_LEVEL_SHIFT, &pde[0]);
+
+    /*
+     * Make sure all other bits are written before the entry is made
+     * present.
+     */
+    smp_mb();
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, pde[0],
                          IOMMU_PDE_PRESENT_MASK,
-                         IOMMU_PDE_PRESENT_SHIFT, &entry);
-    pde[0] = entry;
+                         IOMMU_PDE_PRESENT_SHIFT, &pde[0]);
 
     return need_flush;
 }
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.