[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3] x86/HVM: p2m_ram_ro is incompatible with device pass-through



The write-discard property of the type can't be represented in IOMMU
page table entries. Make sure the respective checks / tracking can't
race, by utilizing the domain lock. The other sides of the sharing/
paging/log-dirty exclusion checks should subsequently perhaps also be
put under that lock then.

This also fixes an unguarded d->arch.hvm access.

Take the opportunity and also convert neighboring bool_t to bool in
struct hvm_domain.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
v3: Re-base.
v2: Don't set p2m_ram_ro_used when failing the request.

--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -255,16 +255,33 @@ static int set_mem_type(struct domain *d
 
     mem_type = array_index_nospec(data->mem_type, ARRAY_SIZE(memtype));
 
-    if ( mem_type == HVMMEM_ioreq_server )
+    switch ( mem_type )
     {
         unsigned int flags;
 
+    case HVMMEM_ioreq_server:
         if ( !hap_enabled(d) )
             return -EOPNOTSUPP;
 
         /* Do not change to HVMMEM_ioreq_server if no ioreq server mapped. */
         if ( !p2m_get_ioreq_server(d, &flags) )
             return -EINVAL;
+
+        break;
+
+    case HVMMEM_ram_ro:
+        /* p2m_ram_ro can't be represented in IOMMU mappings. */
+        domain_lock(d);
+        if ( has_arch_pdevs(d) )
+            rc = -EXDEV;
+        else
+            d->arch.hvm.p2m_ram_ro_used = true;
+        domain_unlock(d);
+
+        if ( rc )
+            return rc;
+
+        break;
     }
 
     while ( iter < data->nr )
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1486,15 +1486,33 @@ static int assign_device(struct domain *
     if ( !is_iommu_enabled(d) )
         return 0;
 
-    /* Prevent device assign if mem paging or mem sharing have been 
-     * enabled for this domain */
-    if ( unlikely(d->arch.hvm.mem_sharing_enabled ||
-                  vm_event_check_ring(d->vm_event_paging) ||
+    domain_lock(d);
+
+    /*
+     * Prevent device assignment if any of
+     * - mem paging
+     * - mem sharing
+     * - the p2m_ram_ro type
+     * - global log-dirty mode
+     * are in use by this domain.
+     */
+    if ( unlikely(vm_event_check_ring(d->vm_event_paging) ||
+#ifdef CONFIG_HVM
+                  (is_hvm_domain(d) &&
+                   (d->arch.hvm.mem_sharing_enabled ||
+                    d->arch.hvm.p2m_ram_ro_used)) ||
+#endif
                   p2m_get_hostp2m(d)->global_logdirty) )
+    {
+        domain_unlock(d);
         return -EXDEV;
+    }
 
     if ( !pcidevs_trylock() )
+    {
+        domain_unlock(d);
         return -ERESTART;
+    }
 
     pdev = pci_get_pdev_by_domain(hardware_domain, seg, bus, devfn);
     if ( !pdev )
@@ -1525,6 +1534,7 @@ static int assign_device(struct domain *
 
  done:
     pcidevs_unlock();
+    domain_unlock(d);
 
     return rc;
 }
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -156,9 +156,10 @@ struct hvm_domain {
 
     struct viridian_domain *viridian;
 
-    bool_t                 mem_sharing_enabled;
-    bool_t                 qemu_mapcache_invalidate;
-    bool_t                 is_s3_suspended;
+    bool                   mem_sharing_enabled;
+    bool                   p2m_ram_ro_used;
+    bool                   qemu_mapcache_invalidate;
+    bool                   is_s3_suspended;
 
     /*
      * TSC value that VCPUs use to calculate their tsc_offset value.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.