[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [v5][PATCH 03/10] xen:x86: define a new hypercall to get RMRR mappings



On 2014/9/3 9:45, Chen, Tiejun wrote:
On 2014/9/2 21:15, Jan Beulich wrote:
On 02.09.14 at 13:10, <tiejun.chen@xxxxxxxxx> wrote:
On 2014/9/2 18:15, Jan Beulich wrote:
On 02.09.14 at 11:59, <tiejun.chen@xxxxxxxxx> wrote:
+    case XENMEM_reserved_device_memory_map:
+    {
+        struct xen_mem_reserved_device_memory *xmrdm = NULL;
+        struct xen_mem_reserved_device_memory_map xmrdmm;
+        XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_t) buffer;
+        XEN_GUEST_HANDLE_PARAM(xen_mem_reserved_device_memory_t)
buffer_param;
+        const struct iommu_ops *ops = iommu_get_ops();
+        unsigned int nr_entries = 0;
+        unsigned int i = 0;
+
+        xmrdm = ops->get_device_reserved_memory(&nr_entries);

Do we still need this iommu_ops somewhere else?

Not this one, but another one (as I had described further down).

+        if ( !nr_entries )

Do we still need this 'nr_entries' here?

Doesn't look like so. But it's you coding this up - you ought to clearly
see what is needed and what not.


I mean we need to get 'nr_entries' before any real operations since if
that is zero, any following operations are pointless. And even that is
nonzero, but actually the user don't know how many buffer should be
passed, so often we may need to notify user again at the first time then
the user call this again with a appropriate buffer. Right?

So how do we get this 'nr_entries'? Seems we have to to go VT-D stuff to
walk that list or other similar approaches. If so, why we still get
those real mapping entries later by accessing VT-D stuff again?

Shouldn't we figure out a approach we just call one time?


I update this patch based on this point:

xen/x86: define a hypercall to expose device reserved memory maps

We need such a new hypercall to get all device reserved memory
maps, then we can check if these maps are overlapping MMIO/RAM.
Note currently just address RMRR issue.

Signed-off-by: Tiejun Chen <tiejun.chen@xxxxxxxxx>
---
 xen/arch/x86/mm.c                  | 23 +++++++++++++++++++++
xen/drivers/passthrough/vtd/dmar.c | 41 ++++++++++++++++++++++++++++++++++++++
 xen/include/public/memory.h        | 37 +++++++++++++++++++++++++++++++++-
 xen/include/xen/mm.h               |  3 +++
 4 files changed, 103 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index d23cb3f..db3345b 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -180,6 +180,14 @@ static uint32_t base_disallow_mask;
       is_pv_domain(d)) ?                                        \
      L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS))

+extern int
+do_copy_device_reserved_memory(struct xen_mem_reserved_device_memory_map *xmrdmmap);
+static int
+copy_drmmap_to_guest(drmmap_callback_t f, struct xen_mem_reserved_device_memory_map *xmrdmmap)
+{
+    return f(xmrdmmap);
+}
+
 static void __init init_frametable_chunk(void *start, void *end)
 {
     unsigned long s = (unsigned long)start;
@@ -4842,6 +4850,21 @@ long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         return rc;
     }

+    case XENMEM_reserved_device_memory_map:
+    {
+        struct xen_mem_reserved_device_memory_map xmrdmmap;
+
+        if ( copy_from_guest(&xmrdmmap, arg, 1) )
+            return -EFAULT;
+
+ rc = copy_drmmap_to_guest(do_copy_device_reserved_memory, &xmrdmmap);
+
+        if ( __copy_to_guest(arg, &xmrdmmap, 1) )
+                rc = -EFAULT;
+
+        return rc;
+    }
+
     default:
         return subarch_memory_op(cmd, arg);
     }
diff --git a/xen/drivers/passthrough/vtd/dmar.c b/xen/drivers/passthrough/vtd/dmar.c
index 1152c3a..6bc5def 100644
--- a/xen/drivers/passthrough/vtd/dmar.c
+++ b/xen/drivers/passthrough/vtd/dmar.c
@@ -28,6 +28,7 @@
 #include <xen/xmalloc.h>
 #include <xen/pci.h>
 #include <xen/pci_regs.h>
+#include <xen/guest_access.h>
 #include <asm/string.h>
 #include "dmar.h"
 #include "iommu.h"
@@ -681,6 +682,46 @@ acpi_parse_one_rmrr(struct acpi_dmar_header *header)
     return ret;
 }

+int
+do_copy_device_reserved_memory(struct xen_mem_reserved_device_memory_map *xmrdmmap)
+{
+    struct acpi_rmrr_unit *rmrru;
+    struct xen_mem_reserved_device_memory xmrdm;
+    int i = 0, rc = 0;
+    XEN_GUEST_HANDLE_PARAM(xen_mem_reserved_device_memory_t) buffer_param;
+    XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_t) buffer;
+
+    if ( list_empty(&acpi_rmrr_units) )
+        return -ENOENT;
+
+    buffer_param = guest_handle_cast(xmrdmmap->buffer,
+                                     xen_mem_reserved_device_memory_t);
+    buffer = guest_handle_from_param(buffer_param,
+                                     xen_mem_reserved_device_memory_t);
+    if ( !guest_handle_okay(buffer, xmrdmmap->nr_entries) )
+        return -EFAULT;
+
+    list_for_each_entry(rmrru, &acpi_rmrr_units, list)
+    {
+        if ( i < xmrdmmap->nr_entries )
+        {
+            xmrdm.start_pfn = rmrru->base_address >> PAGE_SHIFT;
+            xmrdm.nr_pages = PAGE_ALIGN(rmrru->end_address -
+                                        rmrru->base_address) /
+                                        PAGE_SIZE;
+            if ( __copy_to_guest_offset(buffer, i, &xmrdm, 1) )
+                return -EFAULT;
+        }
+        else
+            rc = -ENOBUFS;
+        i++;
+    }
+
+    xmrdmmap->nr_entries = i;
+
+    return rc;
+}
+
 static int __init
 acpi_parse_one_atsr(struct acpi_dmar_header *header)
 {
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 2c57aa0..628c99c 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -523,7 +523,42 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);

 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */

-/* Next available subop number is 26 */
+/*
+ * For legacy reasons, some devices must be configured with special memory
+ * regions to function correctly. The guest must avoid using any of these
+ * regions.
+ *
+ * - Reserved memory Region Reporting Structure,
+ * So returns the device reserved memory map as it was when the domain
+ * was started.
+ */
+#define XENMEM_reserved_device_memory_map   26
+struct xen_mem_reserved_device_memory {
+    /* Start PFN of the current mapping of the page. */
+    xen_pfn_t start_pfn;
+    /* Number of the current mapping pages. */
+    xen_ulong_t nr_pages;
+};
+typedef struct xen_mem_reserved_device_memory xen_mem_reserved_device_memory_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_t);
+
+struct xen_mem_reserved_device_memory_map {
+    /*
+     * On call the number of entries which can be stored in buffer. On
+     * return the number of entries which have been stored in
+     * buffer.
+     */
+    unsigned int nr_entries;
+
+    /*
+     * xen_mem_reserved_device_memory entries in the buffer.
+     */
+    XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_t) buffer;
+};
+typedef struct xen_mem_reserved_device_memory_map xen_mem_reserved_device_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_map_t);
+
+/* Next available subop number is 27 */

 #endif /* __XEN_PUBLIC_MEMORY_H__ */

diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index b183189..4fceafd 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -31,6 +31,7 @@
 #include <xen/types.h>
 #include <xen/list.h>
 #include <xen/spinlock.h>
+#include <public/memory.h>

 struct domain;
 struct page_info;
@@ -371,4 +372,6 @@ int guest_remove_page(struct domain *d, unsigned long gmfn); /* TRUE if the whole page at @mfn is of the requested RAM type(s) above. */
 int page_is_ram_type(unsigned long mfn, unsigned long mem_type);

+typedef int (*drmmap_callback_t)(struct xen_mem_reserved_device_memory_map *xmrdmmap);
+
 #endif /* __XEN_MM_H__ */
--
1.9.1

Thanks
Tiejun

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.