[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC XEN PATCH v4 29/41] xen: add hypercall XENMEM_populate_pmem_map



This hypercall will be used by device models to map host PMEM pages to
guest.

Signed-off-by: Haozhong Zhang <haozhong.zhang@xxxxxxxxx>
---
Cc: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
---
 tools/flask/policy/modules/xen.if   |  3 ++-
 tools/libxc/include/xenctrl.h       | 17 ++++++++++++++
 tools/libxc/xc_domain.c             | 15 +++++++++++++
 xen/common/compat/memory.c          |  1 +
 xen/common/memory.c                 | 44 +++++++++++++++++++++++++++++++++++++
 xen/include/public/memory.h         | 14 +++++++++++-
 xen/include/xsm/dummy.h             | 11 ++++++++++
 xen/include/xsm/xsm.h               | 12 ++++++++++
 xen/xsm/dummy.c                     |  4 ++++
 xen/xsm/flask/hooks.c               | 13 +++++++++++
 xen/xsm/flask/policy/access_vectors |  2 ++
 11 files changed, 134 insertions(+), 2 deletions(-)

diff --git a/tools/flask/policy/modules/xen.if 
b/tools/flask/policy/modules/xen.if
index 55437496f6..8c2d6776f4 100644
--- a/tools/flask/policy/modules/xen.if
+++ b/tools/flask/policy/modules/xen.if
@@ -55,7 +55,8 @@ define(`create_domain_common', `
                        psr_cmt_op psr_cat_op soft_reset set_gnttab_limits };
        allow $1 $2:security check_context;
        allow $1 $2:shadow enable;
-       allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage 
mmuext_op updatemp };
+       allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage 
mmuext_op updatemp
+                       populate_pmem_map };
        allow $1 $2:grant setup;
        allow $1 $2:hvm { cacheattr getparam hvmctl sethvmc
                        setparam nested altp2mhvm altp2mhvm_op dm };
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 5194d3ff5e..4d66cbed0b 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2678,6 +2678,23 @@ int xc_nvdimm_pmem_setup_data(xc_interface *xch,
                               unsigned long smfn, unsigned long emfn,
                               unsigned long mgmt_smfn, unsigned long 
mgmt_emfn);
 
+/*
+ * Map specified host PMEM pages to the specified guest address.
+ *
+ * Parameters:
+ *  xch:     xc interface handle
+ *  domid:   the target domain id
+ *  mfn:     the start MFN of the PMEM pages
+ *  gfn:     the start GFN of the target guest physical pages
+ *  nr_mfns: the number of PMEM pages to be mapped
+ *
+ * Return:
+ *  On success, return 0. Otherwise, return a non-zero error code.
+ */
+int xc_domain_populate_pmem_map(xc_interface *xch, uint32_t domid,
+                                unsigned long mfn, unsigned long gfn,
+                                unsigned long nr_mfns);
+
 /* Compat shims */
 #include "xenctrl_compat.h"
 
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 3ccd27f101..a62470e6d8 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -2435,6 +2435,21 @@ int xc_domain_soft_reset(xc_interface *xch,
     domctl.domain = domid;
     return do_domctl(xch, &domctl);
 }
+
+int xc_domain_populate_pmem_map(xc_interface *xch, uint32_t domid,
+                                unsigned long mfn, unsigned long gfn,
+                                unsigned long nr_mfns)
+{
+    struct xen_pmem_map args = {
+        .domid   = domid,
+        .mfn     = mfn,
+        .gfn     = gfn,
+        .nr_mfns = nr_mfns,
+    };
+
+    return do_memory_op(xch, XENMEM_populate_pmem_map, &args, sizeof(args));
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
index 35bb259808..51bec835b9 100644
--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -525,6 +525,7 @@ int compat_memory_op(unsigned int cmd, 
XEN_GUEST_HANDLE_PARAM(void) compat)
         case XENMEM_add_to_physmap:
         case XENMEM_remove_from_physmap:
         case XENMEM_access_op:
+        case XENMEM_populate_pmem_map:
             break;
 
         case XENMEM_get_vnumainfo:
diff --git a/xen/common/memory.c b/xen/common/memory.c
index a6ba33fdcb..2f870ad2b6 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -23,6 +23,7 @@
 #include <xen/numa.h>
 #include <xen/mem_access.h>
 #include <xen/trace.h>
+#include <xen/pmem.h>
 #include <asm/current.h>
 #include <asm/hardirq.h>
 #include <asm/p2m.h>
@@ -1408,6 +1409,49 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
     }
 #endif
 
+#ifdef CONFIG_NVDIMM_PMEM
+    case XENMEM_populate_pmem_map:
+    {
+        struct xen_pmem_map map;
+        struct xen_pmem_map_args args;
+
+        if ( copy_from_guest(&map, arg, 1) )
+            return -EFAULT;
+
+        if ( map.domid == DOMID_SELF )
+            return -EINVAL;
+
+        d = rcu_lock_domain_by_any_id(map.domid);
+        if ( !d )
+            return -EINVAL;
+
+        rc = xsm_populate_pmem_map(XSM_TARGET, curr_d, d);
+        if ( rc )
+        {
+            rcu_unlock_domain(d);
+            return rc;
+        }
+
+        args.domain = d;
+        args.mfn = map.mfn;
+        args.gfn = map.gfn;
+        args.nr_mfns = map.nr_mfns;
+        args.nr_done = start_extent;
+        args.preempted = 0;
+
+        rc = pmem_populate(&args);
+
+        rcu_unlock_domain(d);
+
+        if ( rc == -ERESTART && args.preempted )
+            return hypercall_create_continuation(
+                __HYPERVISOR_memory_op, "lh",
+                op | (args.nr_done << MEMOP_EXTENT_SHIFT), arg);
+
+        break;
+    }
+#endif /* CONFIG_NVDIMM_PMEM */
+
     default:
         rc = arch_memory_op(cmd, arg);
         break;
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 29386df98b..d74436e4b0 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -650,7 +650,19 @@ struct xen_vnuma_topology_info {
 typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
 DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
 
-/* Next available subop number is 28 */
+#define XENMEM_populate_pmem_map 28
+
+struct xen_pmem_map {
+    /* IN */
+    domid_t domid;
+    unsigned long mfn;
+    unsigned long gfn;
+    unsigned int nr_mfns;
+};
+typedef struct xen_pmem_map xen_pmem_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pmem_map_t);
+
+/* Next available subop number is 29 */
 
 #endif /* __XEN_PUBLIC_MEMORY_H__ */
 
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index b2cd56cdc5..1eb6595cfa 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -724,3 +724,14 @@ static XSM_INLINE int xsm_xen_version (XSM_DEFAULT_ARG 
uint32_t op)
         return xsm_default_action(XSM_PRIV, current->domain, NULL);
     }
 }
+
+#ifdef CONFIG_NVDIMM_PMEM
+
+static XSM_INLINE int xsm_populate_pmem_map(XSM_DEFAULT_ARG
+                                            struct domain *d1, struct domain 
*d2)
+{
+    XSM_ASSERT_ACTION(XSM_TARGET);
+    return xsm_default_action(action, d1, d2);
+}
+
+#endif /* CONFIG_NVDIMM_PMEM */
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 7f7feffc68..e43e79f719 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -180,6 +180,10 @@ struct xsm_operations {
     int (*dm_op) (struct domain *d);
 #endif
     int (*xen_version) (uint32_t cmd);
+
+#ifdef CONFIG_NVDIMM_PMEM
+    int (*populate_pmem_map) (struct domain *d1, struct domain *d2);
+#endif
 };
 
 #ifdef CONFIG_XSM
@@ -692,6 +696,14 @@ static inline int xsm_xen_version (xsm_default_t def, 
uint32_t op)
     return xsm_ops->xen_version(op);
 }
 
+#ifdef CONFIG_NVDIMM_PMEM
+static inline int xsm_populate_pmem_map(xsm_default_t def,
+                                        struct domain *d1, struct domain *d2)
+{
+    return xsm_ops->populate_pmem_map(d1, d2);
+}
+#endif /* CONFIG_NVDIMM_PMEM */
+
 #endif /* XSM_NO_WRAPPERS */
 
 #ifdef CONFIG_MULTIBOOT
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 479b103614..4d65eaca61 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -157,4 +157,8 @@ void __init xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, dm_op);
 #endif
     set_to_dummy_if_null(ops, xen_version);
+
+#ifdef CONFIG_NVDIMM_PMEM
+    set_to_dummy_if_null(ops, populate_pmem_map);
+#endif
 }
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index f677755512..47cfb81d64 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1722,6 +1722,15 @@ static int flask_xen_version (uint32_t op)
     }
 }
 
+#ifdef CONFIG_NVDIMM_PMEM
+
+static int flask_populate_pmem_map(struct domain *d1, struct domain *d2)
+{
+    return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__POPULATE_PMEM_MAP);
+}
+
+#endif /* CONFIG_NVDIMM_PMEM */
+
 long do_flask_op(XEN_GUEST_HANDLE_PARAM(xsm_op_t) u_flask_op);
 int compat_flask_op(XEN_GUEST_HANDLE_PARAM(xsm_op_t) u_flask_op);
 
@@ -1855,6 +1864,10 @@ static struct xsm_operations flask_ops = {
     .dm_op = flask_dm_op,
 #endif
     .xen_version = flask_xen_version,
+
+#ifdef CONFIG_NVDIMM_PMEM
+    .populate_pmem_map = flask_populate_pmem_map,
+#endif /* CONFIG_NVDIMM_PMEM */
 };
 
 void __init flask_init(const void *policy_buffer, size_t policy_size)
diff --git a/xen/xsm/flask/policy/access_vectors 
b/xen/xsm/flask/policy/access_vectors
index 3bfbb892c7..daa6937c22 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -389,6 +389,8 @@ class mmu
 # Allow a privileged domain to install a map of a page it does not own.  Used
 # for stub domain device models with the PV framebuffer.
     target_hack
+# XENMEM_populate_pmem_map
+    populate_pmem_map
 }
 
 # control of the paging_domctl split by subop
-- 
2.15.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.