[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 11/15] mm / iommu: split need_iommu() into has_iommu_pt() and sync_iommu_pt()



The name 'need_iommu' is a little confusing as it suggests a domain needs
to use the IOMMU but something might not be set up yet, when in fact it
doesn't become true until IOMMU mappings for the domain have been created.

Two different meanings are also inferred from it in various places in the
code:

- Some callers want to test whether a domain has IOMMU mappings
- Some callers want to test whether they need to synchronize the domain's
  P2M and IOMMU mappings

This patch therefore creates two new boolean flags, 'has_iommu_pt' and
'sync_iommu_pt' to convey those meanings separately and creates the
corresponding macros. All callers of need_iommu() are then modified to
use the macro appropriate to what they are trying to test.

NOTE: The test of need_iommu(d) in the AMD IOMMU initialization code has
      been replaced with a test of iommu_dom0_strict since this more
      accurately reflects the meaning of the test and brings it into
      line with a similar test in the Intel VT-d code.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Cc: Brian Woods <brian.woods@xxxxxxx>

v4:
 - New in v4.
---
 xen/arch/arm/p2m.c                          |  2 +-
 xen/arch/x86/hvm/mtrr.c                     |  5 +++--
 xen/arch/x86/mm.c                           |  2 +-
 xen/arch/x86/mm/mem_sharing.c               |  2 +-
 xen/arch/x86/mm/p2m-ept.c                   |  2 +-
 xen/arch/x86/mm/p2m-pt.c                    |  2 +-
 xen/arch/x86/mm/p2m.c                       |  8 ++++----
 xen/arch/x86/mm/paging.c                    |  2 +-
 xen/arch/x86/x86_64/mm.c                    |  3 ++-
 xen/common/memory.c                         |  6 +++---
 xen/common/vm_event.c                       |  2 +-
 xen/drivers/passthrough/amd/pci_amd_iommu.c |  2 +-
 xen/drivers/passthrough/device_tree.c       |  2 +-
 xen/drivers/passthrough/iommu.c             | 17 ++++++++++-------
 xen/drivers/passthrough/pci.c               |  6 +++---
 xen/include/asm-arm/grant_table.h           |  2 +-
 xen/include/asm-arm/iommu.h                 |  2 +-
 xen/include/asm-x86/grant_table.h           |  2 +-
 xen/include/asm-x86/iommu.h                 |  2 +-
 xen/include/xen/sched.h                     | 12 ++++++++----
 20 files changed, 46 insertions(+), 37 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index eb39861b73..c972b72baf 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -951,7 +951,7 @@ static int __p2m_set_entry(struct p2m_domain *p2m,
     if ( lpae_valid(orig_pte) && entry->p2m.base != orig_pte.p2m.base )
         p2m_free_entry(p2m, orig_pte, level);
 
-    if ( need_iommu(p2m->domain) &&
+    if ( sync_iommu_pt(p2m->domain) &&
          (lpae_valid(orig_pte) || lpae_valid(*entry)) )
     {
         rc = iommu_iotlb_flush(p2m->domain, _bfn(gfn_x(sgfn)),
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index c502dda693..61131d4b61 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -824,7 +824,8 @@ HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, 
hvm_load_mtrr_msr,
 
 void memory_type_changed(struct domain *d)
 {
-    if ( need_iommu(d) && d->vcpu && d->vcpu[0] )
+    if ( (sync_iommu_pt(d) || iommu_use_hap_pt(d)) &&
+         d->vcpu && d->vcpu[0] )
     {
         p2m_memory_type_changed(d);
         flush_all(FLUSH_CACHE);
@@ -872,7 +873,7 @@ int epte_get_entry_emt(struct domain *d, unsigned long gfn, 
mfn_t mfn,
         return MTRR_TYPE_UNCACHABLE;
     }
 
-    if ( !need_iommu(d) && !cache_flush_permitted(d) )
+    if ( !has_iommu_pt(d) && !cache_flush_permitted(d) )
     {
         *ipat = 1;
         return MTRR_TYPE_WRBACK;
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 08878574f3..462398096f 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2727,7 +2727,7 @@ static int _get_page_type(struct page_info *page, 
unsigned long type,
     {
         /* Special pages should not be accessible from devices. */
         struct domain *d = page_get_owner(page);
-        if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
+        if ( d && is_pv_domain(d) && unlikely(sync_iommu_pt(d)) )
         {
             mfn_t mfn = page_to_mfn(page);
 
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index fad8a9df13..c54845275f 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1610,7 +1610,7 @@ int mem_sharing_domctl(struct domain *d, struct 
xen_domctl_mem_sharing_op *mec)
         case XEN_DOMCTL_MEM_SHARING_CONTROL:
         {
             rc = 0;
-            if ( unlikely(need_iommu(d) && mec->u.enable) )
+            if ( unlikely(has_iommu_pt(d) && mec->u.enable) )
                 rc = -EXDEV;
             else
                 d->arch.hvm_domain.mem_sharing_enabled = mec->u.enable;
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index e52dbb6203..d90424a9a3 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -868,7 +868,7 @@ out:
     {
         if ( iommu_use_hap_pt(d) )
             rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, 
vtd_pte_present);
-        else if ( need_iommu(d) )
+        else if ( sync_iommu_pt(d) )
         {
             bfn_t bfn = _bfn(gfn);
 
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 9255194070..e0ae6bf20f 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -687,7 +687,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t 
mfn,
             if ( iommu_old_flags )
                 amd_iommu_flush_pages(p2m->domain, gfn, page_order);
         }
-        else if ( need_iommu(p2m->domain) )
+        else if ( sync_iommu_pt(p2m->domain) )
         {
             bfn_t bfn = _bfn(gfn);
 
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 7465489074..4ffce37c45 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -713,7 +713,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long 
gfn_l, unsigned long mfn,
     {
         int rc = 0;
 
-        if ( need_iommu(p2m->domain) )
+        if ( sync_iommu_pt(p2m->domain) )
         {
             bfn_t bfn = _bfn(mfn);
 
@@ -777,7 +777,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
 
     if ( !paging_mode_translate(d) )
     {
-        if ( need_iommu(d) && t == p2m_ram_rw )
+        if ( sync_iommu_pt(d) && t == p2m_ram_rw )
         {
             bfn_t bfn = _bfn(mfn_x(mfn));
 
@@ -1165,7 +1165,7 @@ int set_identity_p2m_entry(struct domain *d, unsigned 
long gfn_l,
 
     if ( !paging_mode_translate(d) )
     {
-        if ( !need_iommu(d) )
+        if ( !sync_iommu_pt(d) )
             return 0;
 
         ret = iommu_map_page(d, _bfn(gfn_l), _mfn(gfn_l),
@@ -1261,7 +1261,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned 
long gfn_l)
 
     if ( !paging_mode_translate(d) )
     {
-        if ( !need_iommu(d) )
+        if ( !sync_iommu_pt(d) )
             return 0;
 
         ret = iommu_unmap_page(d, _bfn(gfn_l));
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 2b0445ffe9..d54f6dd496 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -213,7 +213,7 @@ int paging_log_dirty_enable(struct domain *d, bool_t 
log_global)
 {
     int ret;
 
-    if ( need_iommu(d) && log_global )
+    if ( has_iommu_pt(d) && log_global )
     {
         /*
          * Refuse to turn on global log-dirty mode
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index cc58e4cef4..29c3050a7f 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1426,7 +1426,8 @@ int memory_add(unsigned long spfn, unsigned long epfn, 
unsigned int pxm)
     if ( ret )
         goto destroy_m2p;
 
-    if ( iommu_enabled && !iommu_passthrough && !need_iommu(hardware_domain) )
+    if ( iommu_enabled && !iommu_passthrough &&
+         !sync_iommu_pt(hardware_domain) )
     {
         for ( i = spfn; i < epfn; i++ )
             if ( iommu_map_page(hardware_domain, _bfn(i), _mfn(i),
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 0a34677cc3..ad7aa09a5c 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -804,8 +804,8 @@ int xenmem_add_to_physmap(struct domain *d, struct 
xen_add_to_physmap *xatp,
     xatp->size -= start;
 
 #ifdef CONFIG_HAS_PASSTHROUGH
-    if ( need_iommu(d) )
-        this_cpu(iommu_dont_flush_iotlb) = 1;
+    if ( sync_iommu_pt(d) || iommu_use_hap_pt(d) )
+       this_cpu(iommu_dont_flush_iotlb) = 1;
 #endif
 
     while ( xatp->size > done )
@@ -827,7 +827,7 @@ int xenmem_add_to_physmap(struct domain *d, struct 
xen_add_to_physmap *xatp,
     }
 
 #ifdef CONFIG_HAS_PASSTHROUGH
-    if ( need_iommu(d) )
+    if ( sync_iommu_pt(d) || iommu_use_hap_pt(d) )
     {
         int ret;
 
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 144ab81c86..ec3dfd1dae 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -644,7 +644,7 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
 
             /* No paging if iommu is used */
             rc = -EMLINK;
-            if ( unlikely(need_iommu(d)) )
+            if ( unlikely(has_iommu_pt(d)) )
                 break;
 
             rc = -EXDEV;
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c 
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index eea22c3d0d..752751b103 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -256,7 +256,7 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain 
*d)
     if ( allocate_domain_resources(dom_iommu(d)) )
         BUG();
 
-    if ( !iommu_passthrough && !need_iommu(d) )
+    if ( !iommu_passthrough && !iommu_dom0_strict )
     {
         int rc = 0;
 
diff --git a/xen/drivers/passthrough/device_tree.c 
b/xen/drivers/passthrough/device_tree.c
index 671c8db1d0..1aae1a1f93 100644
--- a/xen/drivers/passthrough/device_tree.c
+++ b/xen/drivers/passthrough/device_tree.c
@@ -44,7 +44,7 @@ int iommu_assign_dt_device(struct domain *d, struct 
dt_device_node *dev)
      * The hwdom is forced to use IOMMU for protecting assigned
      * device. Therefore the IOMMU data is already set up.
      */
-    ASSERT(!is_hardware_domain(d) || need_iommu(d));
+    ASSERT(!is_hardware_domain(d) || has_iommu_pt(d));
     rc = iommu_construct(d);
     if ( rc )
         goto fail;
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index ad8ffcfcb2..caf3d125ae 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -179,8 +179,9 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
         return;
 
     register_keyhandler('o', &iommu_dump_p2m_table, "dump iommu p2m table", 0);
-    d->need_iommu = !!iommu_dom0_strict;
-    if ( need_iommu(d) && !iommu_use_hap_pt(d) )
+    d->has_iommu_pt = true;
+    d->sync_iommu_pt = !!iommu_dom0_strict && !iommu_use_hap_pt(d);
+    if ( sync_iommu_pt(d) )
     {
         struct page_info *page;
         unsigned int i = 0;
@@ -219,14 +220,14 @@ void iommu_teardown(struct domain *d)
 {
     const struct domain_iommu *hd = dom_iommu(d);
 
-    d->need_iommu = false;
+    d->has_iommu_pt = false;
     hd->platform_ops->teardown(d);
     tasklet_schedule(&iommu_pt_cleanup_tasklet);
 }
 
 int iommu_construct(struct domain *d)
 {
-    if ( need_iommu(d) )
+    if ( has_iommu_pt(d) )
         return 0;
 
     if ( !iommu_use_hap_pt(d) )
@@ -236,12 +237,14 @@ int iommu_construct(struct domain *d)
         rc = arch_iommu_populate_page_table(d);
         if ( rc )
             return rc;
+
+        d->sync_iommu_pt = true;
     }
 
-    d->need_iommu = true;
+    d->has_iommu_pt = true;
     /*
      * There may be dirty cache lines when a device is assigned
-     * and before need_iommu(d) becoming true, this will cause
+     * and before has_iommu_pt(d) becoming true, this will cause
      * memory_type_changed lose effect if memory type changes.
      * Call memory_type_changed here to amend this.
      */
@@ -500,7 +503,7 @@ static void iommu_dump_p2m_table(unsigned char key)
     ops = iommu_get_ops();
     for_each_domain(d)
     {
-        if ( is_hardware_domain(d) || !need_iommu(d) )
+        if ( is_hardware_domain(d) || !has_iommu_pt(d) )
             continue;
 
         if ( iommu_use_hap_pt(d) )
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index c4890a4295..3d3ad484e7 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1416,7 +1416,7 @@ static int assign_device(struct domain *d, u16 seg, u8 
bus, u8 devfn, u32 flag)
 
     /* Prevent device assign if mem paging or mem sharing have been 
      * enabled for this domain */
-    if ( unlikely(!need_iommu(d) &&
+    if ( unlikely(!has_iommu_pt(d) &&
             (d->arch.hvm_domain.mem_sharing_enabled ||
              vm_event_check_ring(d->vm_event_paging) ||
              p2m_get_hostp2m(d)->global_logdirty)) )
@@ -1460,7 +1460,7 @@ static int assign_device(struct domain *d, u16 seg, u8 
bus, u8 devfn, u32 flag)
     }
 
  done:
-    if ( !has_arch_pdevs(d) && need_iommu(d) )
+    if ( !has_arch_pdevs(d) && has_iommu_pt(d) )
         iommu_teardown(d);
     pcidevs_unlock();
 
@@ -1510,7 +1510,7 @@ int deassign_device(struct domain *d, u16 seg, u8 bus, u8 
devfn)
 
     pdev->fault.count = 0;
 
-    if ( !has_arch_pdevs(d) && need_iommu(d) )
+    if ( !has_arch_pdevs(d) && has_iommu_pt(d) )
         iommu_teardown(d);
 
     return ret;
diff --git a/xen/include/asm-arm/grant_table.h 
b/xen/include/asm-arm/grant_table.h
index 9c2c815526..f01c5e36c9 100644
--- a/xen/include/asm-arm/grant_table.h
+++ b/xen/include/asm-arm/grant_table.h
@@ -86,7 +86,7 @@ static inline unsigned int gnttab_dom0_max(void)
     gfn_x(((i) >= nr_status_frames(t)) ? INVALID_GFN : (t)->arch.status_gfn[i])
 
 #define gnttab_need_iommu_mapping(d)                    \
-    (is_domain_direct_mapped(d) && need_iommu(d))
+    (is_domain_direct_mapped(d) && sync_iommu_pt(d))
 
 #endif /* __ASM_GRANT_TABLE_H__ */
 /*
diff --git a/xen/include/asm-arm/iommu.h b/xen/include/asm-arm/iommu.h
index 8d1506c6f7..f6df32f860 100644
--- a/xen/include/asm-arm/iommu.h
+++ b/xen/include/asm-arm/iommu.h
@@ -21,7 +21,7 @@ struct arch_iommu
 };
 
 /* Always share P2M Table between the CPU and the IOMMU */
-#define iommu_use_hap_pt(d) (need_iommu(d))
+#define iommu_use_hap_pt(d) (has_iommu_pt(d))
 
 const struct iommu_ops *iommu_get_ops(void);
 void __init iommu_set_ops(const struct iommu_ops *ops);
diff --git a/xen/include/asm-x86/grant_table.h 
b/xen/include/asm-x86/grant_table.h
index 76ec5dda2c..80e64b74b7 100644
--- a/xen/include/asm-x86/grant_table.h
+++ b/xen/include/asm-x86/grant_table.h
@@ -99,6 +99,6 @@ static inline void gnttab_clear_flag(unsigned int nr, 
uint16_t *st)
 #define gnttab_release_host_mappings(domain) ( paging_mode_external(domain) )
 
 #define gnttab_need_iommu_mapping(d)                \
-    (!paging_mode_translate(d) && need_iommu(d))
+    (!paging_mode_translate(d) && sync_iommu_pt(d))
 
 #endif /* __ASM_GRANT_TABLE_H__ */
diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
index 9216c09162..963829a5a2 100644
--- a/xen/include/asm-x86/iommu.h
+++ b/xen/include/asm-x86/iommu.h
@@ -80,7 +80,7 @@ static inline int iommu_hardware_setup(void)
 
 /* Are we using the domain P2M table as its IOMMU pagetable? */
 #define iommu_use_hap_pt(d) \
-    (hap_enabled(d) && need_iommu(d) && iommu_hap_pt_share)
+    (hap_enabled(d) && has_iommu_pt(d) && iommu_hap_pt_share)
 
 void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned 
int value);
 unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg);
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 5b9820e4e1..66da6d6b23 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -372,8 +372,10 @@ struct domain
 #ifdef CONFIG_HAS_PASSTHROUGH
     struct domain_iommu iommu;
 
-    /* Does this guest need iommu mappings? */
-    bool             need_iommu;
+    /* Does this guest have iommu mappings? */
+    bool             has_iommu_pt;
+    /* Does this guest need synchronization of iommu mappings? */
+    bool             sync_iommu_pt;
 #endif
     /* is node-affinity automatically computed? */
     bool             auto_node_affinity;
@@ -894,9 +896,11 @@ void watchdog_domain_destroy(struct domain *d);
 #define is_pinned_vcpu(v) ((v)->domain->is_pinned || \
                            cpumask_weight((v)->cpu_hard_affinity) == 1)
 #ifdef CONFIG_HAS_PASSTHROUGH
-#define need_iommu(d)    ((d)->need_iommu)
+#define has_iommu_pt(d) ((d)->has_iommu_pt)
+#define sync_iommu_pt(d) ((d)->sync_iommu_pt)
 #else
-#define need_iommu(d)    (0)
+#define has_iommu_pt(d) (0)
+#define sync_iommu_pt(d) (0)
 #endif
 
 static inline bool is_vcpu_online(const struct vcpu *v)
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.