[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen/arm: vpl011: Add a new domctl API to initialize vpl011



commit 86039f2e8c20c06ab5234123a200f4617c14f77d
Author:     Bhupinder Thakur <bhupinder.thakur@xxxxxxxxxx>
AuthorDate: Wed Sep 27 11:43:15 2017 +0530
Commit:     Stefano Stabellini <sstabellini@xxxxxxxxxx>
CommitDate: Tue Oct 3 15:17:36 2017 -0700

    xen/arm: vpl011: Add a new domctl API to initialize vpl011
    
    Add a new domctl API to initialize vpl011. It takes the GFN and console
    backend domid as input and returns an event channel to be used for
    sending and receiving events from Xen.
    
    Xen will communicate with xenconsole using GFN as the ring buffer and
    the event channel to transmit and receive pl011 data on the guest domain's
    behalf.
    
    Signed-off-by: Bhupinder Thakur <bhupinder.thakur@xxxxxxxxxx>
    Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
    Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 tools/libxc/include/xenctrl.h | 20 +++++++++++++++++
 tools/libxc/xc_domain.c       | 27 ++++++++++++++++++++++
 tools/libxl/libxl_arch.h      |  7 ++++++
 tools/libxl/libxl_arm.c       | 27 ++++++++++++++++++++++
 tools/libxl/libxl_dom.c       |  4 ++++
 tools/libxl/libxl_x86.c       |  8 +++++++
 xen/arch/arm/domain.c         |  6 +++++
 xen/arch/arm/domctl.c         | 52 +++++++++++++++++++++++++++++++++++++++++++
 xen/include/public/domctl.h   | 24 ++++++++++++++++++++
 9 files changed, 175 insertions(+)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index b970905..3bcab3c 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -886,6 +886,26 @@ int xc_vcpu_getcontext(xc_interface *xch,
                        vcpu_guest_context_any_t *ctxt);
 
 /**
+ * This function initializes the vuart emulation and returns
+ * the event to be used by the backend for communicating with
+ * the emulation code.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * #parm type type of vuart
+ * @parm domid the domain to get information from
+ * @parm console_domid the domid of the backend console
+ * @parm gfn the guest pfn to be used as the ring buffer
+ * @parm evtchn the event channel to be used for events
+ * @return 0 on success, negative error on failure
+ */
+int xc_dom_vuart_init(xc_interface *xch,
+                      uint32_t type,
+                      domid_t domid,
+                      domid_t console_domid,
+                      xen_pfn_t gfn,
+                      evtchn_port_t *evtchn);
+
+/**
  * This function returns information about the XSAVE state of a particular
  * vcpu of a domain. If extstate->size and extstate->xfeature_mask are 0,
  * the call is considered a query to retrieve them and the buffer is not
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 4a70780..ccd2016 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -343,6 +343,33 @@ int xc_domain_get_guest_width(xc_interface *xch, uint32_t 
domid,
     return 0;
 }
 
+int xc_dom_vuart_init(xc_interface *xch,
+                      uint32_t type,
+                      domid_t domid,
+                      domid_t console_domid,
+                      xen_pfn_t gfn,
+                      evtchn_port_t *evtchn)
+{
+    DECLARE_DOMCTL;
+    int rc = 0;
+
+    memset(&domctl, 0, sizeof(domctl));
+
+    domctl.cmd = XEN_DOMCTL_vuart_op;
+    domctl.domain = domid;
+    domctl.u.vuart_op.cmd = XEN_DOMCTL_VUART_OP_INIT;
+    domctl.u.vuart_op.type = type;
+    domctl.u.vuart_op.console_domid = console_domid;
+    domctl.u.vuart_op.gfn = gfn;
+
+    if ( (rc = do_domctl(xch, &domctl)) < 0 )
+        return rc;
+
+    *evtchn = domctl.u.vuart_op.evtchn;
+
+    return rc;
+}
+
 int xc_domain_getinfo(xc_interface *xch,
                       uint32_t first_domid,
                       unsigned int max_doms,
diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h
index 5e1fc60..784ec7f 100644
--- a/tools/libxl/libxl_arch.h
+++ b/tools/libxl/libxl_arch.h
@@ -44,6 +44,13 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc *gc,
                                       libxl_domain_build_info *info,
                                       struct xc_dom_image *dom);
 
+/* perform any pending hardware initialization */
+_hidden
+int libxl__arch_build_dom_finish(libxl__gc *gc,
+                                 libxl_domain_build_info *info,
+                                 struct xc_dom_image *dom,
+                                 libxl__domain_build_state *state);
+
 /* build vNUMA vmemrange with arch specific information */
 _hidden
 int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
diff --git a/tools/libxl/libxl_arm.c b/tools/libxl/libxl_arm.c
index d842d88..6f5bc3c 100644
--- a/tools/libxl/libxl_arm.c
+++ b/tools/libxl/libxl_arm.c
@@ -1038,6 +1038,33 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc 
*gc,
     return 0;
 }
 
+int libxl__arch_build_dom_finish(libxl__gc *gc,
+                                 libxl_domain_build_info *info,
+                                 struct xc_dom_image *dom,
+                                 libxl__domain_build_state *state)
+{
+    int rc = 0, ret;
+
+    if (info->arch_arm.vuart != LIBXL_VUART_TYPE_SBSA_UART) {
+        rc = 0;
+        goto out;
+    }
+
+    ret = xc_dom_vuart_init(CTX->xch,
+                            XEN_DOMCTL_VUART_TYPE_VPL011,
+                            dom->guest_domid,
+                            dom->console_domid,
+                            dom->vuart_gfn,
+                            &state->vuart_port);
+    if (ret < 0) {
+        rc = ERROR_FAIL;
+        LOG(ERROR, "xc_dom_vuart_init failed\n");
+    }
+
+out:
+    return rc;
+}
+
 int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
                                       uint32_t domid,
                                       libxl_domain_build_info *info,
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index 397f394..ef834e6 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -749,6 +749,10 @@ static int libxl__build_dom(libxl__gc *gc, uint32_t domid,
         LOGE(ERROR, "xc_dom_gnttab_init failed");
         goto out;
     }
+    if ((ret = libxl__arch_build_dom_finish(gc, info, dom, state)) != 0) {
+        LOGE(ERROR, "libxl__arch_build_dom_finish failed");
+        goto out;
+    }
 
 out:
     return ret != 0 ? ERROR_FAIL : 0;
diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c
index d321b83..5f91fe4 100644
--- a/tools/libxl/libxl_x86.c
+++ b/tools/libxl/libxl_x86.c
@@ -392,6 +392,14 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc 
*gc,
     return rc;
 }
 
+int libxl__arch_build_dom_finish(libxl__gc *gc,
+                                 libxl_domain_build_info *info,
+                                 struct xc_dom_image *dom,
+                                 libxl__domain_build_state *state)
+{
+    return 0;
+}
+
 /* Return 0 on success, ERROR_* on failure. */
 int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
                                       uint32_t domid,
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index e39a798..a74ff1c 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -872,6 +872,12 @@ int domain_relinquish_resources(struct domain *d)
         if ( ret )
             return ret;
 
+        /*
+         * Release the resources allocated for vpl011 which were
+         * allocated via a DOMCTL call XEN_DOMCTL_vuart_op.
+         */
+        domain_vpl011_deinit(d);
+
         d->arch.relmem = RELMEM_xen;
         /* Fallthrough */
 
diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
index 8232f44..4587c75 100644
--- a/xen/arch/arm/domctl.c
+++ b/xen/arch/arm/domctl.c
@@ -5,9 +5,11 @@
  */
 
 #include <xen/errno.h>
+#include <xen/guest_access.h>
 #include <xen/hypercall.h>
 #include <xen/iocap.h>
 #include <xen/lib.h>
+#include <xen/mm.h>
 #include <xen/sched.h>
 #include <xen/types.h>
 #include <xsm/xsm.h>
@@ -20,6 +22,29 @@ void arch_get_domain_info(const struct domain *d,
     info->flags |= XEN_DOMINF_hap;
 }
 
+static int handle_vuart_init(struct domain *d, 
+                             struct xen_domctl_vuart_op *vuart_op)
+{
+    int rc;
+    struct vpl011_init_info info;
+
+    info.console_domid = vuart_op->console_domid;
+    info.gfn = _gfn(vuart_op->gfn);
+
+    if ( d->creation_finished )
+        return -EPERM;
+
+    if ( vuart_op->type != XEN_DOMCTL_VUART_TYPE_VPL011 )
+        return -EOPNOTSUPP;
+
+    rc = domain_vpl011_init(d, &info);
+
+    if ( !rc )
+        vuart_op->evtchn = info.evtchn;
+
+    return rc;
+}
+
 long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
                     XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
 {
@@ -119,6 +144,33 @@ long arch_do_domctl(struct xen_domctl *domctl, struct 
domain *d,
         d->disable_migrate = domctl->u.disable_migrate.disable;
         return 0;
 
+    case XEN_DOMCTL_vuart_op:
+    {
+        int rc;
+        unsigned int i;
+        struct xen_domctl_vuart_op *vuart_op = &domctl->u.vuart_op;
+
+        /* check that structure padding must be 0. */
+        for ( i = 0; i < sizeof(vuart_op->pad); i++ )
+            if ( vuart_op->pad[i] )
+                return -EINVAL;
+
+        switch( vuart_op->cmd )
+        {
+        case XEN_DOMCTL_VUART_OP_INIT:
+            rc = handle_vuart_init(d, vuart_op);
+            break;
+
+        default:
+            rc = -EINVAL;
+            break;
+        }
+
+        if ( !rc )
+            rc = copy_to_guest(u_domctl, domctl, 1);
+
+        return rc;
+    }
     default:
     {
         int rc;
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index a6448ea..8853445 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -33,6 +33,7 @@
 #endif
 
 #include "xen.h"
+#include "event_channel.h"
 #include "grant_table.h"
 #include "hvm/save.h"
 #include "memory.h"
@@ -1079,6 +1080,27 @@ struct xen_domctl_set_gnttab_limits {
     uint32_t maptrack_frames;  /* IN */
 };
 
+/* XEN_DOMCTL_vuart_op */
+struct xen_domctl_vuart_op {
+#define XEN_DOMCTL_VUART_OP_INIT  0
+        uint32_t cmd;           /* XEN_DOMCTL_VUART_OP_* */
+#define XEN_DOMCTL_VUART_TYPE_VPL011 0
+        uint32_t type;          /* IN - type of vuart.
+                                 *      Currently only vpl011 supported.
+                                 */
+        uint64_aligned_t  gfn;  /* IN - guest gfn to be used as a
+                                 *      ring buffer.
+                                 */
+        domid_t console_domid;  /* IN - domid of domain running the
+                                 *      backend console.
+                                 */
+        uint8_t pad[2];
+        evtchn_port_t evtchn;   /* OUT - remote port of the event
+                                 *       channel used for sending
+                                 *       ring buffer events.
+                                 */
+};
+
 struct xen_domctl {
     uint32_t cmd;
 #define XEN_DOMCTL_createdomain                   1
@@ -1157,6 +1179,7 @@ struct xen_domctl {
 #define XEN_DOMCTL_psr_cat_op                    78
 #define XEN_DOMCTL_soft_reset                    79
 #define XEN_DOMCTL_set_gnttab_limits             80
+#define XEN_DOMCTL_vuart_op                      81
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -1220,6 +1243,7 @@ struct xen_domctl {
         struct xen_domctl_monitor_op        monitor_op;
         struct xen_domctl_psr_cat_op        psr_cat_op;
         struct xen_domctl_set_gnttab_limits set_gnttab_limits;
+        struct xen_domctl_vuart_op          vuart_op;
         uint8_t                             pad[128];
     } u;
 };
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.