[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC XEN PATCH v3 11/39] xen/pmem: add XEN_SYSCTL_nvdimm_pmem_get_regions



XEN_SYSCTL_nvdimm_pmem_get_regions, which is a command of hypercall
XEN_SYSCTL_nvdimm_op, is to get a list of PMEM regions of specified
type (see PMEM_REGION_TYPE_*).

Signed-off-by: Haozhong Zhang <haozhong.zhang@xxxxxxxxx>
---
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
---
 tools/libxc/include/xenctrl.h | 18 ++++++++++++
 tools/libxc/xc_misc.c         | 63 ++++++++++++++++++++++++++++++++++++++++
 xen/common/pmem.c             | 67 +++++++++++++++++++++++++++++++++++++++++++
 xen/include/public/sysctl.h   | 27 +++++++++++++++++
 4 files changed, 175 insertions(+)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index e4d26967ba..d750e67460 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2587,6 +2587,24 @@ int xc_domain_cacheflush(xc_interface *xch, uint32_t 
domid,
 int xc_nvdimm_pmem_get_regions_nr(xc_interface *xch,
                                   uint8_t type, uint32_t *nr);
 
+/*
+ * Get an array of information of PMEM regions of the specified type.
+ *
+ * Parameters:
+ *  xch:    xc interface handle
+ *  type:   the type of PMEM regions, must be one of PMEM_REGION_TYPE_*
+ *  buffer: the buffer where the information of PMEM regions is returned,
+ *          the caller should allocate enough memory for it.
+ *  nr :    IN: the maximum number of PMEM regions that can be returned
+ *              in @buffer
+ *          OUT: the actual number of returned PMEM regions in @buffer
+ *
+ * Return:
+ *  On success, return 0. Otherwise, return a non-zero error code.
+ */
+int xc_nvdimm_pmem_get_regions(xc_interface *xch, uint8_t type,
+                               void *buffer, uint32_t *nr);
+
 /* Compat shims */
 #include "xenctrl_compat.h"
 
diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
index fa66410869..f9ce802eda 100644
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -912,6 +912,69 @@ int xc_nvdimm_pmem_get_regions_nr(xc_interface *xch, 
uint8_t type, uint32_t *nr)
     return rc;
 }
 
+int xc_nvdimm_pmem_get_regions(xc_interface *xch, uint8_t type,
+                               void *buffer, uint32_t *nr)
+{
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(buffer, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    xen_sysctl_nvdimm_op_t *nvdimm = &sysctl.u.nvdimm;
+    xen_sysctl_nvdimm_pmem_regions_t *regions = &nvdimm->u.pmem_regions;
+    unsigned int max;
+    unsigned long size;
+    int rc;
+
+    if ( !buffer || !nr )
+        return -EINVAL;
+
+    max = *nr;
+    if ( !max )
+        return 0;
+
+    switch ( type )
+    {
+    case PMEM_REGION_TYPE_RAW:
+        size = sizeof(xen_sysctl_nvdimm_pmem_raw_region_t) * max;
+        break;
+
+    default:
+        return -EINVAL;
+    }
+
+    HYPERCALL_BOUNCE_SET_SIZE(buffer, size);
+    if ( xc_hypercall_bounce_pre(xch, buffer) )
+        return -EFAULT;
+
+    sysctl.cmd = XEN_SYSCTL_nvdimm_op;
+    nvdimm->cmd = XEN_SYSCTL_nvdimm_pmem_get_regions;
+    nvdimm->pad = 0;
+    nvdimm->err = 0;
+    regions->type = type;
+    regions->num_regions = max;
+
+    switch ( type )
+    {
+    case PMEM_REGION_TYPE_RAW:
+        set_xen_guest_handle(regions->u_buffer.raw_regions, buffer);
+        break;
+
+    default:
+        rc = -EINVAL;
+        goto out;
+    }
+
+    rc = do_sysctl(xch, &sysctl);
+    if ( !rc )
+        *nr = regions->num_regions;
+    else if ( nvdimm->err )
+        rc = -nvdimm->err;
+
+out:
+    xc_hypercall_bounce_post(xch, buffer);
+
+    return rc;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/common/pmem.c b/xen/common/pmem.c
index 995dfcb867..a737e7dc71 100644
--- a/xen/common/pmem.c
+++ b/xen/common/pmem.c
@@ -22,6 +22,8 @@
 #include <xen/paging.h>
 #include <xen/pmem.h>
 
+#include <asm/guest_access.h>
+
 /*
  * All PMEM regions presenting in NFIT SPA range structures are linked
  * in this list.
@@ -122,6 +124,67 @@ static int 
pmem_get_regions_nr(xen_sysctl_nvdimm_pmem_regions_nr_t *regions_nr)
     return rc;
 }
 
+static int pmem_get_raw_regions(
+    XEN_GUEST_HANDLE_64(xen_sysctl_nvdimm_pmem_raw_region_t) regions,
+    unsigned int *num_regions)
+{
+    struct list_head *cur;
+    unsigned int nr = 0, max = *num_regions;
+    xen_sysctl_nvdimm_pmem_raw_region_t region;
+    int rc = 0;
+
+    if ( !guest_handle_okay(regions, max * sizeof(region)) )
+        return -EINVAL;
+
+    list_for_each(cur, &pmem_raw_regions)
+    {
+        struct pmem *pmem = list_entry(cur, struct pmem, link);
+
+        if ( nr >= max )
+            break;
+
+        region.smfn = pmem->smfn;
+        region.emfn = pmem->emfn;
+        region.pxm = pmem->u.raw.pxm;
+
+        if ( copy_to_guest_offset(regions, nr, &region, 1) )
+        {
+            rc = -EFAULT;
+            break;
+        }
+
+        nr++;
+    }
+
+    *num_regions = nr;
+
+    return rc;
+}
+
+static int pmem_get_regions(xen_sysctl_nvdimm_pmem_regions_t *regions)
+{
+    unsigned int type = regions->type, max = regions->num_regions;
+    int rc = 0;
+
+    if ( !max )
+        return 0;
+
+    switch ( type )
+    {
+    case PMEM_REGION_TYPE_RAW:
+        rc = pmem_get_raw_regions(regions->u_buffer.raw_regions, &max);
+        break;
+
+    default:
+        rc = -EINVAL;
+    }
+
+    if ( !rc )
+        regions->num_regions = max;
+
+    return rc;
+}
+
 /**
  * Register a pmem region to Xen.
  *
@@ -167,6 +230,10 @@ int pmem_do_sysctl(struct xen_sysctl_nvdimm_op *nvdimm)
         rc = pmem_get_regions_nr(&nvdimm->u.pmem_regions_nr);
         break;
 
+    case XEN_SYSCTL_nvdimm_pmem_get_regions:
+        rc = pmem_get_regions(&nvdimm->u.pmem_regions);
+        break;
+
     default:
         rc = -ENOSYS;
     }
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index cf308bbc45..2635b1c911 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -1121,6 +1121,15 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_set_parameter_t);
 /* Types of PMEM regions */
 #define PMEM_REGION_TYPE_RAW        0 /* PMEM regions detected by Xen */
 
+/* PMEM_REGION_TYPE_RAW */
+struct xen_sysctl_nvdimm_pmem_raw_region {
+    uint64_t smfn;
+    uint64_t emfn;
+    uint32_t pxm;
+};
+typedef struct xen_sysctl_nvdimm_pmem_raw_region 
xen_sysctl_nvdimm_pmem_raw_region_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_nvdimm_pmem_raw_region_t);
+
 /* XEN_SYSCTL_nvdimm_pmem_get_regions_nr */
 struct xen_sysctl_nvdimm_pmem_regions_nr {
     uint8_t type;         /* IN: one of PMEM_REGION_TYPE_* */
@@ -1129,12 +1138,30 @@ struct xen_sysctl_nvdimm_pmem_regions_nr {
 typedef struct xen_sysctl_nvdimm_pmem_regions_nr 
xen_sysctl_nvdimm_pmem_regions_nr_t;
 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_nvdimm_pmem_regions_nr_t);
 
+/* XEN_SYSCTL_nvdimm_pmem_get_regions */
+struct xen_sysctl_nvdimm_pmem_regions {
+    uint8_t type;         /* IN: one of PMEM_REGION_TYPE_* */
+    uint32_t num_regions; /* IN: the maximum number of entries that can be
+                                 returned via the guest handler in @u_buffer
+                             OUT: the actual number of entries returned via
+                                  the guest handler in @u_buffer */
+    union {
+        /* if type == PMEM_REGION_TYPE_RAW */
+        XEN_GUEST_HANDLE_64(xen_sysctl_nvdimm_pmem_raw_region_t) raw_regions;
+    } u_buffer;           /* IN: the guest handler where the entries of PMEM
+                                 regions of the type @type are returned */
+};
+typedef struct xen_sysctl_nvdimm_pmem_regions xen_sysctl_nvdimm_pmem_regions_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_nvdimm_pmem_regions_t);
+
 struct xen_sysctl_nvdimm_op {
     uint32_t cmd; /* IN: XEN_SYSCTL_nvdimm_*. */
 #define XEN_SYSCTL_nvdimm_pmem_get_regions_nr     0
+#define XEN_SYSCTL_nvdimm_pmem_get_regions        1
     uint32_t pad; /* IN: Always zero. */
     union {
         xen_sysctl_nvdimm_pmem_regions_nr_t pmem_regions_nr;
+        xen_sysctl_nvdimm_pmem_regions_t pmem_regions;
     } u;
     uint32_t err; /* OUT: error code */
 };
-- 
2.14.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.