[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [v8][PATCH 03/17] introduce XENMEM_reserved_device_memory_map



On 2014/12/3 3:47, Konrad Rzeszutek Wilk wrote:
On Mon, Dec 01, 2014 at 05:24:21PM +0800, Tiejun Chen wrote:
From: Jan Beulich <jbeulich@xxxxxxxx>

This is a prerequisite for punching holes into HVM and PVH guests' P2M
to allow passing through devices that are associated with (on VT-d)
RMRRs.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---

[snip]

@@ -1101,6 +1129,29 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
          break;
      }

+#ifdef HAS_PASSTHROUGH
+    case XENMEM_reserved_device_memory_map:
+    {
+        struct get_reserved_device_memory grdm;
+
+        if ( copy_from_guest(&grdm.map, arg, 1) ||
+             !guest_handle_okay(grdm.map.buffer, grdm.map.nr_entries) )
+            return -EFAULT;
+

Shouldn't there be an XSM check here?

I'm not sure, and Jan should be the author for this patch, so Jan can give you a correct reply.


+        grdm.used_entries = 0;
+        rc = iommu_get_reserved_device_memory(get_reserved_device_memory,
+                                              &grdm);
+

Also since we doing an iteration over possible many nr_entries should
we think about returning -EAGAIN to user-space so that it can retry?

Yes,

(As in, have preemption baked in this hypercall)

+        if ( !rc && grdm.map.nr_entries < grdm.used_entries )
+            rc = -ENOBUFS;

we have this return value.

Thanks
Tiejun

+        grdm.map.nr_entries = grdm.used_entries;
+        if ( __copy_to_guest(arg, &grdm.map, 1) )
+            rc = -EFAULT;
+
+        break;
+    }
+#endif
+
      default:
          rc = arch_memory_op(cmd, arg);
          break;
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index cc12735..7c17e8d 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -344,6 +344,16 @@ void iommu_crash_shutdown(void)
      iommu_enabled = iommu_intremap = 0;
  }

+int iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
+{
+    const struct iommu_ops *ops = iommu_get_ops();
+
+    if ( !iommu_enabled || !ops->get_reserved_device_memory )
+        return 0;
+
+    return ops->get_reserved_device_memory(func, ctxt);
+}
+
  bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature)
  {
      const struct hvm_iommu *hd = domain_hvm_iommu(d);
diff --git a/xen/drivers/passthrough/vtd/dmar.c 
b/xen/drivers/passthrough/vtd/dmar.c
index 5e41e7a..86cfad3 100644
--- a/xen/drivers/passthrough/vtd/dmar.c
+++ b/xen/drivers/passthrough/vtd/dmar.c
@@ -901,3 +901,20 @@ int platform_supports_x2apic(void)
      unsigned int mask = ACPI_DMAR_INTR_REMAP | ACPI_DMAR_X2APIC_OPT_OUT;
      return cpu_has_x2apic && ((dmar_flags & mask) == ACPI_DMAR_INTR_REMAP);
  }
+
+int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
+{
+    struct acpi_rmrr_unit *rmrr;
+    int rc = 0;
+
+    list_for_each_entry(rmrr, &acpi_rmrr_units, list)
+    {
+        rc = func(PFN_DOWN(rmrr->base_address),
+                  PFN_UP(rmrr->end_address) - PFN_DOWN(rmrr->base_address),
+                  ctxt);
+        if ( rc )
+            break;
+    }
+
+    return rc;
+}
diff --git a/xen/drivers/passthrough/vtd/extern.h 
b/xen/drivers/passthrough/vtd/extern.h
index 5524dba..f9ee9b0 100644
--- a/xen/drivers/passthrough/vtd/extern.h
+++ b/xen/drivers/passthrough/vtd/extern.h
@@ -75,6 +75,7 @@ int domain_context_mapping_one(struct domain *domain, struct 
iommu *iommu,
                                 u8 bus, u8 devfn, const struct pci_dev *);
  int domain_context_unmap_one(struct domain *domain, struct iommu *iommu,
                               u8 bus, u8 devfn);
+int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt);

  unsigned int io_apic_read_remap_rte(unsigned int apic, unsigned int reg);
  void io_apic_write_remap_rte(unsigned int apic,
diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index 19d8165..a38f201 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -2491,6 +2491,7 @@ const struct iommu_ops intel_iommu_ops = {
      .crash_shutdown = vtd_crash_shutdown,
      .iotlb_flush = intel_iommu_iotlb_flush,
      .iotlb_flush_all = intel_iommu_iotlb_flush_all,
+    .get_reserved_device_memory = intel_iommu_get_reserved_device_memory,
      .dump_p2m_table = vtd_dump_p2m_table,
  };

diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 595f953..cee4535 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -572,7 +572,29 @@ struct xen_vnuma_topology_info {
  typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
  DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);

-/* Next available subop number is 27 */
+/*
+ * For legacy reasons, some devices must be configured with special memory
+ * regions to function correctly.  The guest must avoid using any of these
+ * regions.
+ */
+#define XENMEM_reserved_device_memory_map   27
+struct xen_reserved_device_memory {
+    xen_pfn_t start_pfn;
+    xen_ulong_t nr_pages;
+};
+typedef struct xen_reserved_device_memory xen_reserved_device_memory_t;
+DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);
+
+struct xen_reserved_device_memory_map {
+    /* IN/OUT */
+    unsigned int nr_entries;
+    /* OUT */
+    XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer;
+};
+typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
+
+/* Next available subop number is 28 */

  #endif /* __XEN_PUBLIC_MEMORY_H__ */

diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 8eb764a..409f6f8 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -120,6 +120,8 @@ void iommu_dt_domain_destroy(struct domain *d);

  struct page_info;

+typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, void *ctxt);
+
  struct iommu_ops {
      int (*init)(struct domain *d);
      void (*hwdom_init)(struct domain *d);
@@ -156,12 +158,14 @@ struct iommu_ops {
      void (*crash_shutdown)(void);
      void (*iotlb_flush)(struct domain *d, unsigned long gfn, unsigned int 
page_count);
      void (*iotlb_flush_all)(struct domain *d);
+    int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
      void (*dump_p2m_table)(struct domain *d);
  };

  void iommu_suspend(void);
  void iommu_resume(void);
  void iommu_crash_shutdown(void);
+int iommu_get_reserved_device_memory(iommu_grdm_t *, void *);

  void iommu_share_p2m_table(struct domain *d);

diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 41b3e35..42229fd 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -61,9 +61,10 @@
  !     memory_exchange                 memory.h
  !     memory_map                      memory.h
  !     memory_reservation              memory.h
-?      mem_access_op           memory.h
+?      mem_access_op                   memory.h
  !     pod_target                      memory.h
  !     remove_from_physmap             memory.h
+!      reserved_device_memory_map      memory.h
  ?     physdev_eoi                     physdev.h
  ?     physdev_get_free_pirq           physdev.h
  ?     physdev_irq                     physdev.h
--
1.9.1



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.