[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v13 9/9] mm / iommu: split need_iommu() into has_iommu_pt() and need_iommu_pt_sync()



The name 'need_iommu()' is a little confusing as it suggests a domain needs
to use the IOMMU but something might not be set up yet, when in fact it
represents a tri-state value (not a boolean as might be expected) where
-1 means 'IOMMU mappings being set up' and 1 means 'IOMMU mappings have
been fully set up'.

Two different meanings are also inferred from the macro it in various
places in the code:

- Some callers want to test whether a domain has IOMMU mappings at all
- Some callers want to test whether they need to synchronize the domain's
  P2M and IOMMU mappings

This patch replaces the 'need_iommu' tri-state value with a defined
enumeration and adds a boolean flag 'need_sync' to separate these meanings,
and places both of these in struct domain_iommu, rather than directly in
struct domain.
This patch also creates two new boolean macros:

- 'has_iommu_pt()' evaluates to true if a domain has IOMMU mappings, even
  if they are still under construction.
- 'need_iommu_pt_sync()' evaluates to true if a domain requires explicit
  synchronization of the P2M and IOMMU mappings.

All callers of need_iommu() are then modified to use the macro appropriate
to what they are trying to test, except for the instance in
xen/drivers/passthrough/pci.c:assign_device() which has simply been
removed since it appears to be unnecessary.

NOTE: There are some callers of need_iommu() that strictly operate on
      the hardware domain. In some of these case a more global flag is
      used instead.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Acked-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Cc: Brian Woods <brian.woods@xxxxxxx>

v13:
 - Re-work checks in memory_add().
 - Extend commit comment to note removal of need_iommu() check in
   assign_device().

v12:
 - Fix two mis-uses of iommu_hap_pt_share().
 - Remove one use has_iommu_pt() check in passthrough/pci.c

v11:
 - Pulled in from v6 of the full PV-IOMMU series.
 _ Changed the condition being tested in memory_add() to be more self-
   explanatory but also added a comment to explain the circumstances
   under which iommu_map_page() needs to be called.
 - Get rid of #ifdef CONFIG_HAS_PASSTHROUGH in memory.c since the if
   clauses within will never be executed unless the option is defined
   (since has_iommu_pt() will always be false otherwise). Also flushing
   should be done regardless of whether the IOMMU is sharing page tables
   or not.

v6:
 - Deal with need_iommu being tri-state.
 - Change the name of 'sync_iommu_pt' to 'need_iommu_pt_sync' and make
   sure it is set as soon as the mappings are under construction.
 - Not adding Razvan's A-b because of change.

v4:
 - New in v4.
---
 xen/arch/arm/p2m.c                          |  2 +-
 xen/arch/x86/hvm/mtrr.c                     |  4 ++--
 xen/arch/x86/mm.c                           |  2 +-
 xen/arch/x86/mm/mem_sharing.c               |  2 +-
 xen/arch/x86/mm/p2m-ept.c                   |  2 +-
 xen/arch/x86/mm/p2m-pt.c                    |  2 +-
 xen/arch/x86/mm/p2m.c                       |  8 +++----
 xen/arch/x86/mm/paging.c                    |  2 +-
 xen/arch/x86/x86_64/mm.c                    | 10 ++++++--
 xen/common/memory.c                         | 10 +++-----
 xen/common/vm_event.c                       |  2 +-
 xen/drivers/passthrough/amd/pci_amd_iommu.c |  2 +-
 xen/drivers/passthrough/device_tree.c       | 21 ++++++++--------
 xen/drivers/passthrough/iommu.c             | 37 ++++++++++++++++++++++-------
 xen/drivers/passthrough/pci.c               | 11 ++++-----
 xen/drivers/passthrough/x86/iommu.c         |  2 --
 xen/include/asm-arm/grant_table.h           |  2 +-
 xen/include/asm-arm/iommu.h                 |  2 +-
 xen/include/asm-x86/grant_table.h           |  2 +-
 xen/include/asm-x86/iommu.h                 |  2 +-
 xen/include/xen/iommu.h                     | 17 +++++++++++++
 xen/include/xen/sched.h                     |  9 ++++---
 22 files changed, 93 insertions(+), 60 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 1c79ff7ade..a0bec7c95c 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -955,7 +955,7 @@ static int __p2m_set_entry(struct p2m_domain *p2m,
     if ( lpae_is_valid(orig_pte) && entry->p2m.base != orig_pte.p2m.base )
         p2m_free_entry(p2m, orig_pte, level);
 
-    if ( need_iommu(p2m->domain) &&
+    if ( need_iommu_pt_sync(p2m->domain) &&
          (lpae_is_valid(orig_pte) || lpae_is_valid(*entry)) )
     {
         rc = iommu_iotlb_flush(p2m->domain, _dfn(gfn_x(sgfn)),
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index 4f2f195f7d..b8fa340d5a 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -783,7 +783,7 @@ HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, 
hvm_load_mtrr_msr, 1,
 
 void memory_type_changed(struct domain *d)
 {
-    if ( need_iommu(d) && d->vcpu && d->vcpu[0] )
+    if ( has_iommu_pt(d) && d->vcpu && d->vcpu[0] )
     {
         p2m_memory_type_changed(d);
         flush_all(FLUSH_CACHE);
@@ -831,7 +831,7 @@ int epte_get_entry_emt(struct domain *d, unsigned long gfn, 
mfn_t mfn,
         return MTRR_TYPE_UNCACHABLE;
     }
 
-    if ( !need_iommu(d) && !cache_flush_permitted(d) )
+    if ( !has_iommu_pt(d) && !cache_flush_permitted(d) )
     {
         *ipat = 1;
         return MTRR_TYPE_WRBACK;
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 5a7c5335a2..48acdadc8c 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2787,7 +2787,7 @@ static int _get_page_type(struct page_info *page, 
unsigned long type,
     {
         /* Special pages should not be accessible from devices. */
         struct domain *d = page_get_owner(page);
-        if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
+        if ( d && is_pv_domain(d) && unlikely(need_iommu_pt_sync(d)) )
         {
             mfn_t mfn = page_to_mfn(page);
 
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 349e6fd2cf..1dab2c8cc3 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1612,7 +1612,7 @@ int mem_sharing_domctl(struct domain *d, struct 
xen_domctl_mem_sharing_op *mec)
         case XEN_DOMCTL_MEM_SHARING_CONTROL:
         {
             rc = 0;
-            if ( unlikely(need_iommu(d) && mec->u.enable) )
+            if ( unlikely(has_iommu_pt(d) && mec->u.enable) )
                 rc = -EXDEV;
             else
                 d->arch.hvm.mem_sharing_enabled = mec->u.enable;
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index f32df3eb7a..a524ac86fa 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -879,7 +879,7 @@ out:
     {
         if ( iommu_use_hap_pt(d) )
             rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, 
vtd_pte_present);
-        else if ( need_iommu(d) )
+        else if ( need_iommu_pt_sync(d) )
         {
             dfn_t dfn = _dfn(gfn);
 
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index bf36160993..cc834dc95f 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -688,7 +688,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t 
mfn,
             if ( iommu_old_flags )
                 amd_iommu_flush_pages(p2m->domain, gfn, page_order);
         }
-        else if ( need_iommu(p2m->domain) )
+        else if ( need_iommu_pt_sync(p2m->domain) )
         {
             dfn_t dfn = _dfn(gfn);
 
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 4a98198004..7b63e39912 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -721,7 +721,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long 
gfn_l, unsigned long mfn,
     {
         int rc = 0;
 
-        if ( need_iommu(p2m->domain) )
+        if ( need_iommu_pt_sync(p2m->domain) )
         {
             dfn_t dfn = _dfn(mfn);
 
@@ -785,7 +785,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
 
     if ( !paging_mode_translate(d) )
     {
-        if ( need_iommu(d) && t == p2m_ram_rw )
+        if ( need_iommu_pt_sync(d) && t == p2m_ram_rw )
         {
             dfn_t dfn = _dfn(mfn_x(mfn));
 
@@ -1175,7 +1175,7 @@ int set_identity_p2m_entry(struct domain *d, unsigned 
long gfn_l,
 
     if ( !paging_mode_translate(d) )
     {
-        if ( !need_iommu(d) )
+        if ( !need_iommu_pt_sync(d) )
             return 0;
 
         ret = iommu_map_page(d, _dfn(gfn_l), _mfn(gfn_l),
@@ -1271,7 +1271,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned 
long gfn_l)
 
     if ( !paging_mode_translate(d) )
     {
-        if ( !need_iommu(d) )
+        if ( !need_iommu_pt_sync(d) )
             return 0;
 
         ret = iommu_unmap_page(d, _dfn(gfn_l));
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 7f460bd321..f32a60188a 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -213,7 +213,7 @@ int paging_log_dirty_enable(struct domain *d, bool_t 
log_global)
 {
     int ret;
 
-    if ( need_iommu(d) && log_global )
+    if ( has_iommu_pt(d) && log_global )
     {
         /*
          * Refuse to turn on global log-dirty mode
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index d1fce57432..543ea030e3 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1426,8 +1426,14 @@ int memory_add(unsigned long spfn, unsigned long epfn, 
unsigned int pxm)
     if ( ret )
         goto destroy_m2p;
 
-    if ( iommu_enabled && !iommu_hwdom_passthrough &&
-         !need_iommu(hardware_domain) )
+    /*
+     * If hardware domain has IOMMU mappings but page tables are not
+     * shared or being kept in sync then newly added memory needs to be
+     * mapped here.
+     */
+    if ( has_iommu_pt(hardware_domain) &&
+         !iommu_use_hap_pt(hardware_domain) &&
+         !need_iommu_pt_sync(hardware_domain) )
     {
         for ( i = spfn; i < epfn; i++ )
             if ( iommu_map_page(hardware_domain, _dfn(i), _mfn(i),
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 52d55dfed7..db3334bae6 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -805,10 +805,8 @@ int xenmem_add_to_physmap(struct domain *d, struct 
xen_add_to_physmap *xatp,
     xatp->gpfn += start;
     xatp->size -= start;
 
-#ifdef CONFIG_HAS_PASSTHROUGH
-    if ( need_iommu(d) )
-        this_cpu(iommu_dont_flush_iotlb) = 1;
-#endif
+    if ( has_iommu_pt(d) )
+       this_cpu(iommu_dont_flush_iotlb) = 1;
 
     while ( xatp->size > done )
     {
@@ -828,8 +826,7 @@ int xenmem_add_to_physmap(struct domain *d, struct 
xen_add_to_physmap *xatp,
         }
     }
 
-#ifdef CONFIG_HAS_PASSTHROUGH
-    if ( need_iommu(d) )
+    if ( has_iommu_pt(d) )
     {
         int ret;
 
@@ -846,7 +843,6 @@ int xenmem_add_to_physmap(struct domain *d, struct 
xen_add_to_physmap *xatp,
         if ( unlikely(rc < 0) )
             domu_crash(d);
     }
-#endif
 
     return rc;
 }
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 100da8048c..6ffd18a448 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -642,7 +642,7 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
 
             /* No paging if iommu is used */
             rc = -EMLINK;
-            if ( unlikely(need_iommu(d)) )
+            if ( unlikely(has_iommu_pt(d)) )
                 break;
 
             rc = -EXDEV;
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c 
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 5e99b6988e..688cf14c91 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -263,7 +263,7 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain 
*d)
     if ( allocate_domain_resources(dom_iommu(d)) )
         BUG();
 
-    if ( !iommu_hwdom_passthrough && !need_iommu(d) )
+    if ( !iommu_hwdom_passthrough && !iommu_hwdom_strict )
     {
         int rc = 0;
 
diff --git a/xen/drivers/passthrough/device_tree.c 
b/xen/drivers/passthrough/device_tree.c
index 421f003438..b6eaae7283 100644
--- a/xen/drivers/passthrough/device_tree.c
+++ b/xen/drivers/passthrough/device_tree.c
@@ -40,17 +40,16 @@ int iommu_assign_dt_device(struct domain *d, struct 
dt_device_node *dev)
     if ( !list_empty(&dev->domain_list) )
         goto fail;
 
-    if ( need_iommu(d) <= 0 )
-    {
-        /*
-         * The hwdom is forced to use IOMMU for protecting assigned
-         * device. Therefore the IOMMU data is already set up.
-         */
-        ASSERT(!is_hardware_domain(d));
-        rc = iommu_construct(d);
-        if ( rc )
-            goto fail;
-    }
+    /*
+     * The hwdom is forced to use IOMMU for protecting assigned
+     * device. Therefore the IOMMU data is already set up.
+     */
+    ASSERT(!is_hardware_domain(d) ||
+           hd->status == IOMMU_STATUS_initialized);
+
+    rc = iommu_construct(d);
+    if ( rc )
+        goto fail;
 
     /* The flag field doesn't matter to DT device. */
     rc = hd->platform_ops->assign_device(d, 0, dt_to_dev(dev), 0);
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index ea7ccbace7..922e508a45 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -197,7 +197,7 @@ static void __hwdom_init check_hwdom_reqs(struct domain *d)
 
 void __hwdom_init iommu_hwdom_init(struct domain *d)
 {
-    const struct domain_iommu *hd = dom_iommu(d);
+    struct domain_iommu *hd = dom_iommu(d);
 
     check_hwdom_reqs(d);
 
@@ -205,8 +205,10 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
         return;
 
     register_keyhandler('o', &iommu_dump_p2m_table, "dump iommu p2m table", 0);
-    d->need_iommu = iommu_hwdom_strict;
-    if ( need_iommu(d) && !iommu_use_hap_pt(d) )
+
+    hd->status = IOMMU_STATUS_initializing;
+    hd->need_sync = iommu_hwdom_strict && !iommu_use_hap_pt(d);
+    if ( need_iommu_pt_sync(d) )
     {
         struct page_info *page;
         unsigned int i = 0;
@@ -239,35 +241,51 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
     }
 
     hd->platform_ops->hwdom_init(d);
+
+    hd->status = IOMMU_STATUS_initialized;
 }
 
 void iommu_teardown(struct domain *d)
 {
-    const struct domain_iommu *hd = dom_iommu(d);
+    struct domain_iommu *hd = dom_iommu(d);
 
-    d->need_iommu = 0;
+    hd->status = IOMMU_STATUS_disabled;
     hd->platform_ops->teardown(d);
     tasklet_schedule(&iommu_pt_cleanup_tasklet);
 }
 
 int iommu_construct(struct domain *d)
 {
-    if ( need_iommu(d) > 0 )
+    struct domain_iommu *hd = dom_iommu(d);
+
+    if ( hd->status == IOMMU_STATUS_initialized )
         return 0;
 
     if ( !iommu_use_hap_pt(d) )
     {
         int rc;
 
+        hd->status = IOMMU_STATUS_initializing;
+        hd->need_sync = true;
+
         rc = arch_iommu_populate_page_table(d);
         if ( rc )
+        {
+            if ( rc != -ERESTART )
+            {
+                hd->need_sync = false;
+                hd->status = IOMMU_STATUS_disabled;
+            }
+
             return rc;
+        }
     }
 
-    d->need_iommu = 1;
+    hd->status = IOMMU_STATUS_initialized;
+
     /*
      * There may be dirty cache lines when a device is assigned
-     * and before need_iommu(d) becoming true, this will cause
+     * and before has_iommu_pt(d) becoming true, this will cause
      * memory_type_changed lose effect if memory type changes.
      * Call memory_type_changed here to amend this.
      */
@@ -522,7 +540,8 @@ static void iommu_dump_p2m_table(unsigned char key)
     ops = iommu_get_ops();
     for_each_domain(d)
     {
-        if ( is_hardware_domain(d) || need_iommu(d) <= 0 )
+        if ( is_hardware_domain(d) ||
+             dom_iommu(d)->status < IOMMU_STATUS_initialized )
             continue;
 
         if ( iommu_use_hap_pt(d) )
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 9695cf566d..e5b9602762 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1416,10 +1416,9 @@ static int assign_device(struct domain *d, u16 seg, u8 
bus, u8 devfn, u32 flag)
 
     /* Prevent device assign if mem paging or mem sharing have been 
      * enabled for this domain */
-    if ( unlikely(!need_iommu(d) &&
-            (d->arch.hvm.mem_sharing_enabled ||
-             vm_event_check_ring(d->vm_event_paging) ||
-             p2m_get_hostp2m(d)->global_logdirty)) )
+    if ( unlikely(d->arch.hvm.mem_sharing_enabled ||
+                  vm_event_check_ring(d->vm_event_paging) ||
+                  p2m_get_hostp2m(d)->global_logdirty) )
         return -EXDEV;
 
     if ( !pcidevs_trylock() )
@@ -1460,7 +1459,7 @@ static int assign_device(struct domain *d, u16 seg, u8 
bus, u8 devfn, u32 flag)
     }
 
  done:
-    if ( !has_arch_pdevs(d) && need_iommu(d) )
+    if ( !has_arch_pdevs(d) && has_iommu_pt(d) )
         iommu_teardown(d);
     pcidevs_unlock();
 
@@ -1510,7 +1509,7 @@ int deassign_device(struct domain *d, u16 seg, u8 bus, u8 
devfn)
 
     pdev->fault.count = 0;
 
-    if ( !has_arch_pdevs(d) && need_iommu(d) )
+    if ( !has_arch_pdevs(d) && has_iommu_pt(d) )
         iommu_teardown(d);
 
     return ret;
diff --git a/xen/drivers/passthrough/x86/iommu.c 
b/xen/drivers/passthrough/x86/iommu.c
index b6acfdfe95..1f1c6b0fa6 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -48,8 +48,6 @@ int arch_iommu_populate_page_table(struct domain *d)
     struct page_info *page;
     int rc = 0, n = 0;
 
-    d->need_iommu = -1;
-
     this_cpu(iommu_dont_flush_iotlb) = 1;
     spin_lock(&d->page_alloc_lock);
 
diff --git a/xen/include/asm-arm/grant_table.h 
b/xen/include/asm-arm/grant_table.h
index d8fde01651..37415b7821 100644
--- a/xen/include/asm-arm/grant_table.h
+++ b/xen/include/asm-arm/grant_table.h
@@ -90,7 +90,7 @@ void gnttab_mark_dirty(struct domain *d, mfn_t mfn);
     gfn_x(((i) >= nr_status_frames(t)) ? INVALID_GFN : (t)->arch.status_gfn[i])
 
 #define gnttab_need_iommu_mapping(d)                    \
-    (is_domain_direct_mapped(d) && need_iommu(d))
+    (is_domain_direct_mapped(d) && need_iommu_pt_sync(d))
 
 #endif /* __ASM_GRANT_TABLE_H__ */
 /*
diff --git a/xen/include/asm-arm/iommu.h b/xen/include/asm-arm/iommu.h
index 8d1506c6f7..f6df32f860 100644
--- a/xen/include/asm-arm/iommu.h
+++ b/xen/include/asm-arm/iommu.h
@@ -21,7 +21,7 @@ struct arch_iommu
 };
 
 /* Always share P2M Table between the CPU and the IOMMU */
-#define iommu_use_hap_pt(d) (need_iommu(d))
+#define iommu_use_hap_pt(d) (has_iommu_pt(d))
 
 const struct iommu_ops *iommu_get_ops(void);
 void __init iommu_set_ops(const struct iommu_ops *ops);
diff --git a/xen/include/asm-x86/grant_table.h 
b/xen/include/asm-x86/grant_table.h
index 761a8c33a5..1e6a98813d 100644
--- a/xen/include/asm-x86/grant_table.h
+++ b/xen/include/asm-x86/grant_table.h
@@ -94,6 +94,6 @@ static inline void gnttab_clear_flag(unsigned int nr, 
uint16_t *st)
 #define gnttab_release_host_mappings(domain) ( paging_mode_external(domain) )
 
 #define gnttab_need_iommu_mapping(d)                \
-    (!paging_mode_translate(d) && need_iommu(d))
+    (!paging_mode_translate(d) && need_iommu_pt_sync(d))
 
 #endif /* __ASM_GRANT_TABLE_H__ */
diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
index 7c3187c8ec..fa37b0539b 100644
--- a/xen/include/asm-x86/iommu.h
+++ b/xen/include/asm-x86/iommu.h
@@ -91,7 +91,7 @@ static inline int iommu_hardware_setup(void)
 
 /* Are we using the domain P2M table as its IOMMU pagetable? */
 #define iommu_use_hap_pt(d) \
-    (hap_enabled(d) && need_iommu(d) && iommu_hap_pt_share)
+    (hap_enabled(d) && has_iommu_pt(d) && iommu_hap_pt_share)
 
 void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned 
int value);
 unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg);
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 9ae8321bb4..7ebaca5ca4 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -103,6 +103,13 @@ enum iommu_feature
 
 bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature);
 
+enum iommu_status
+{
+    IOMMU_STATUS_disabled,
+    IOMMU_STATUS_initializing,
+    IOMMU_STATUS_initialized
+};
+
 struct domain_iommu {
     struct arch_iommu arch;
 
@@ -116,6 +123,16 @@ struct domain_iommu {
 
     /* Features supported by the IOMMU */
     DECLARE_BITMAP(features, IOMMU_FEAT_count);
+
+    /* Status of guest IOMMU mappings */
+    enum iommu_status status;
+
+    /*
+     * Does the guest reqire mappings to be synchonized, to maintain
+     * the default dfn == pfn map. (See comment on dfn at the top of
+     * include/xen/mm.h).
+     */
+    bool need_sync;
 };
 
 #define dom_iommu(d)              (&(d)->iommu)
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index f2c594d197..63fb70bcdc 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -371,9 +371,6 @@ struct domain
 
 #ifdef CONFIG_HAS_PASSTHROUGH
     struct domain_iommu iommu;
-
-    /* Does this guest need iommu mappings (-1 meaning "being set up")? */
-    s8               need_iommu;
 #endif
     /* is node-affinity automatically computed? */
     bool             auto_node_affinity;
@@ -898,9 +895,11 @@ static inline bool is_hvm_vcpu(const struct vcpu *v)
 #define is_pinned_vcpu(v) ((v)->domain->is_pinned || \
                            cpumask_weight((v)->cpu_hard_affinity) == 1)
 #ifdef CONFIG_HAS_PASSTHROUGH
-#define need_iommu(d)    ((d)->need_iommu)
+#define has_iommu_pt(d) (dom_iommu(d)->status != IOMMU_STATUS_disabled)
+#define need_iommu_pt_sync(d) (dom_iommu(d)->need_sync)
 #else
-#define need_iommu(d)    (0)
+#define has_iommu_pt(d) false
+#define need_iommu_pt_sync(d) false
 #endif
 
 static inline bool is_vcpu_online(const struct vcpu *v)
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.