[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 6/7] x86: add iommu_op to query reserved ranges



Certain areas of memory, such as RMRRs, must be mapped 1:1
(i.e. BFN == MFN) through the IOMMU.

This patch adds an iommu_op to allow these ranges to be queried.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/arch/x86/iommu_op.c       | 121 ++++++++++++++++++++++++++++++++++++++++++
 xen/include/public/iommu_op.h |  35 ++++++++++++
 xen/include/xlat.lst          |   2 +
 3 files changed, 158 insertions(+)

diff --git a/xen/arch/x86/iommu_op.c b/xen/arch/x86/iommu_op.c
index edd8a384b3..ac81b98b7a 100644
--- a/xen/arch/x86/iommu_op.c
+++ b/xen/arch/x86/iommu_op.c
@@ -22,6 +22,58 @@
 #include <xen/event.h>
 #include <xen/guest_access.h>
 #include <xen/hypercall.h>
+#include <xen/iommu.h>
+
+struct get_rdm_ctxt {
+    unsigned int max_entries;
+    unsigned int nr_entries;
+    XEN_GUEST_HANDLE(xen_iommu_reserved_region_t) regions;
+};
+
+static int get_rdm(xen_pfn_t start, xen_ulong_t nr, u32 id, void *arg)
+{
+    struct get_rdm_ctxt *ctxt = arg;
+
+    if ( ctxt->nr_entries < ctxt->max_entries )
+    {
+        xen_iommu_reserved_region_t region = {
+            .start_bfn = start,
+            .nr_frames = nr,
+        };
+
+        if ( copy_to_guest_offset(ctxt->regions, ctxt->nr_entries, &region,
+                                  1) )
+            return -EFAULT;
+    }
+
+    ctxt->nr_entries++;
+
+    return 1;
+}
+
+static int iommuop_query_reserved(struct xen_iommu_op_query_reserved *op)
+{
+    struct get_rdm_ctxt ctxt = {
+        .max_entries = op->nr_entries,
+        .regions = op->regions,
+    };
+    int rc;
+
+    if (op->pad != 0)
+        return -EINVAL;
+
+    rc = iommu_get_reserved_device_memory(get_rdm, &ctxt);
+    if ( rc )
+        return rc;
+
+    /* Pass back the actual number of reserved regions */
+    op->nr_entries = ctxt.nr_entries;
+
+    if ( ctxt.nr_entries > ctxt.max_entries )
+        return -ENOBUFS;
+
+    return 0;
+}
 
 static bool can_control_iommu(void)
 {
@@ -45,6 +97,10 @@ static void iommu_op(xen_iommu_op_t *op)
 {
     switch ( op->op )
     {
+    case XEN_IOMMUOP_query_reserved:
+        op->status = iommuop_query_reserved(&op->u.query_reserved);
+        break;
+
     default:
         op->status = -EOPNOTSUPP;
         break;
@@ -119,6 +175,8 @@ int 
compat_iommu_op(XEN_GUEST_HANDLE_PARAM(compat_iommu_op_t) uops,
     {
         compat_iommu_op_t cmp;
         xen_iommu_op_t nat;
+        unsigned int u;
+        int32_t status;
 
         if ( ((i & 0xff) == 0xff) && hypercall_preempt_check() )
         {
@@ -132,12 +190,75 @@ int 
compat_iommu_op(XEN_GUEST_HANDLE_PARAM(compat_iommu_op_t) uops,
             break;
         }
 
+        /*
+         * The xlat magic doesn't quite know how to handle the union so
+         * we need to fix things up here.
+         */
+#define XLAT_iommu_op_u_query_reserved XEN_IOMMUOP_query_reserved
+        u = cmp.op;
+
+#define XLAT_iommu_op_query_reserved_HNDL_regions(_d_, _s_) \
+        do \
+        { \
+            if ( !compat_handle_is_null((_s_)->regions) ) \
+            { \
+                unsigned int *nr_entries = COMPAT_ARG_XLAT_VIRT_BASE; \
+                xen_iommu_reserved_region_t *regions = \
+                    (void *)(nr_entries + 1); \
+                \
+                if ( sizeof(*nr_entries) + \
+                     (sizeof(*regions) * (_s_)->nr_entries) > \
+                     COMPAT_ARG_XLAT_SIZE ) \
+                    return -E2BIG; \
+                \
+                *nr_entries = (_s_)->nr_entries; \
+                set_xen_guest_handle((_d_)->regions, regions); \
+            } \
+            else \
+                set_xen_guest_handle((_d_)->regions, NULL); \
+        } while (false)
+
         XLAT_iommu_op(&nat, &cmp);
 
+#undef XLAT_iommu_op_query_reserved_HNDL_regions
+
         iommu_op(&nat);
 
+        status = nat.status;
+
+#define XLAT_iommu_op_query_reserved_HNDL_regions(_d_, _s_) \
+        do \
+        { \
+            if ( !compat_handle_is_null((_d_)->regions) ) \
+            { \
+                unsigned int *nr_entries = COMPAT_ARG_XLAT_VIRT_BASE; \
+                xen_iommu_reserved_region_t *regions = \
+                    (void *)(nr_entries + 1); \
+                unsigned int j; \
+                \
+                for ( j = 0; \
+                      j < min_t(unsigned int, (_d_)->nr_entries, \
+                                *nr_entries); \
+                      j++ ) \
+                { \
+                    compat_iommu_reserved_region_t region; \
+                    \
+                    XLAT_iommu_reserved_region(&region, &regions[j]); \
+                    \
+                    if ( __copy_to_compat_offset((_d_)->regions, j, \
+                                                 &region, 1) ) \
+                        status = -EFAULT; \
+                } \
+            } \
+        } while (false)
+
         XLAT_iommu_op(&cmp, &nat);
 
+        /* The status will have been modified if copy_to_compat() failed */
+        cmp.status = status;
+
+#undef XLAT_iommu_op_query_reserved_HNDL_regions
+
         if ( copy_to_guest_offset(uops, i, &cmp, 1) )
         {
             rc = -EFAULT;
diff --git a/xen/include/public/iommu_op.h b/xen/include/public/iommu_op.h
index 202cb63fb5..24b8b9e0cc 100644
--- a/xen/include/public/iommu_op.h
+++ b/xen/include/public/iommu_op.h
@@ -25,11 +25,46 @@
 
 #include "xen.h"
 
+typedef unsigned long xen_bfn_t;
+
+/* Structure describing a single region reserved in the IOMMU */
+struct xen_iommu_reserved_region {
+    xen_bfn_t start_bfn;
+    unsigned int nr_frames;
+    unsigned int pad;
+};
+typedef struct xen_iommu_reserved_region xen_iommu_reserved_region_t;
+DEFINE_XEN_GUEST_HANDLE(xen_iommu_reserved_region_t);
+
+/*
+ * XEN_IOMMUOP_query_reserved: Query ranges reserved in the IOMMU.
+ */
+#define XEN_IOMMUOP_query_reserved 1
+
+struct xen_iommu_op_query_reserved {
+    /*
+     * IN/OUT - On entries this is the number of entries available
+     *          in the regions array below.
+     *          On exit this is the actual number of reserved regions.
+     */
+    unsigned int nr_entries;
+    unsigned int pad;
+    /*
+     * OUT - This array is populated with reserved regions. If it is
+     *       not sufficiently large then available entries are populated,
+     *       but the op status code will be set to -ENOBUFS.
+     */
+    XEN_GUEST_HANDLE(xen_iommu_reserved_region_t) regions;
+};
+
 struct xen_iommu_op {
     uint16_t op;
     uint16_t flags; /* op specific flags */
     int32_t status; /* op completion status: */
                     /* 0 for success otherwise, negative errno */
+    union {
+        struct xen_iommu_op_query_reserved query_reserved;
+    } u;
 };
 typedef struct xen_iommu_op xen_iommu_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_iommu_op_t);
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 7409759084..a2070b6d7d 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -76,6 +76,8 @@
 ?      vcpu_hvm_context                hvm/hvm_vcpu.h
 ?      vcpu_hvm_x86_32                 hvm/hvm_vcpu.h
 ?      vcpu_hvm_x86_64                 hvm/hvm_vcpu.h
+!      iommu_reserved_region           iommu_op.h
+!      iommu_op_query_reserved         iommu_op.h
 !      iommu_op                        iommu_op.h
 ?      kexec_exec                      kexec.h
 !      kexec_image                     kexec.h
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.