[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 06/25 v6] xen/arm: vpl011: Add a new domctl API to initialize vpl011
On Mon, 17 Jul 2017, Bhupinder Thakur wrote: > Add a new domctl API to initialize vpl011. It takes the GFN and console > backend domid as input and returns an event channel to be used for > sending and receiving events from Xen. > > Xen will communicate with xenconsole using GFN as the ring buffer and > the event channel to transmit and receive pl011 data on the guest domain's > behalf. > > Signed-off-by: Bhupinder Thakur <bhupinder.thakur@xxxxxxxxxx> > --- > CC: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> > CC: Wei Liu <wei.liu2@xxxxxxxxxx> > CC: Stefano Stabellini <sstabellini@xxxxxxxxxx> > CC: Julien Grall <julien.grall@xxxxxxx> > > Changes since v5: > - xc_dom_vpl011_init() will be compiled for both x86/arm architectures as > there > is nothing architecture specific in this function. This function will > return > error when called for x86. > - Fixed coding style issues in libxl. > > Changes since v4: > - Removed libxl__arch_domain_create_finish(). > - Added a new function libxl__arch_build_dom_finish(), which is called at the > last > in libxl__build_dom(). This function calls the vpl011 initialization > function now. > > Changes since v3: > - Added a new arch specific function libxl__arch_domain_create_finish(), which > calls the vpl011 initialization function. For x86 this function does not do > anything. > - domain_vpl011_init() takes a pointer to a structure which contains all the > required information such as console_domid, gfn instead of passing > parameters > separately. > - Dropped a DOMCTL API defined for de-initializing vpl011 as that should be > taken care when the domain is destroyed (and not dependent on userspace > libraries/applications). > > Changes since v2: > - Replaced the DOMCTL APIs defined for get/set of event channel and GFN with > a set of DOMCTL APIs for initializing and de-initializing vpl011 emulation. > > tools/libxc/include/xenctrl.h | 18 ++++++++++++++++++ > tools/libxc/xc_domain.c | 24 ++++++++++++++++++++++++ > tools/libxl/libxl_arch.h | 6 ++++++ > tools/libxl/libxl_arm.c | 20 ++++++++++++++++++++ > tools/libxl/libxl_dom.c | 4 ++++ > tools/libxl/libxl_x86.c | 8 ++++++++ > xen/arch/arm/domain.c | 5 +++++ > xen/arch/arm/domctl.c | 37 +++++++++++++++++++++++++++++++++++++ > xen/include/public/domctl.h | 21 +++++++++++++++++++++ > 9 files changed, 143 insertions(+) > > diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h > index c51bb3b..423c6f3 100644 > --- a/tools/libxc/include/xenctrl.h > +++ b/tools/libxc/include/xenctrl.h > @@ -886,6 +886,24 @@ int xc_vcpu_getcontext(xc_interface *xch, > vcpu_guest_context_any_t *ctxt); > > /** > + * This function initializes the vpl011 emulation and returns > + * the event to be used by the backend for communicating with > + * the emulation code. > + * > + * @parm xch a handle to an open hypervisor interface > + * @parm domid the domain to get information from > + * @parm console_domid the domid of the backend console > + * @parm gfn the guest pfn to be used as the ring buffer > + * @parm evtchn the event channel to be used for events > + * @return 0 on success, negative error on failure > + */ > +int xc_dom_vpl011_init(xc_interface *xch, > + domid_t domid, > + uint32_t console_domid, Why is this uint32_t instead of domid_t? > + xen_pfn_t gfn, > + evtchn_port_t *evtchn); > + > +/** > * This function returns information about the XSAVE state of a particular > * vcpu of a domain. If extstate->size and extstate->xfeature_mask are 0, > * the call is considered a query to retrieve them and the buffer is not > diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c > index 3bab4e8..fab3c5e 100644 > --- a/tools/libxc/xc_domain.c > +++ b/tools/libxc/xc_domain.c > @@ -343,6 +343,30 @@ int xc_domain_get_guest_width(xc_interface *xch, > uint32_t domid, > return 0; > } > > +int xc_dom_vpl011_init(xc_interface *xch, > + domid_t domid, > + uint32_t console_domid, same here of course > + xen_pfn_t gfn, > + evtchn_port_t *evtchn) > +{ > + DECLARE_DOMCTL; > + int rc = 0; > + > + domctl.cmd = XEN_DOMCTL_vuart_op; > + domctl.domain = (domid_t)domid; > + domctl.u.vuart_op.cmd = XEN_DOMCTL_VUART_OP_INIT; > + domctl.u.vuart_op.type = XEN_DOMCTL_VUART_TYPE_VPL011; > + domctl.u.vuart_op.console_domid = console_domid; > + domctl.u.vuart_op.gfn = gfn; > + > + if ( (rc = do_domctl(xch, &domctl)) < 0 ) > + return rc; > + > + *evtchn = domctl.u.vuart_op.evtchn; > + > + return rc; > +} > + > int xc_domain_getinfo(xc_interface *xch, > uint32_t first_domid, > unsigned int max_doms, > diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h > index 5e1fc60..118b92c 100644 > --- a/tools/libxl/libxl_arch.h > +++ b/tools/libxl/libxl_arch.h > @@ -44,6 +44,12 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc > *gc, > libxl_domain_build_info *info, > struct xc_dom_image *dom); > > +/* perform any pending hardware initialization */ > +int libxl__arch_build_dom_finish(libxl__gc *gc, > + libxl_domain_build_info *info, > + struct xc_dom_image *dom, > + libxl__domain_build_state *state); > + > /* build vNUMA vmemrange with arch specific information */ > _hidden > int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc, > diff --git a/tools/libxl/libxl_arm.c b/tools/libxl/libxl_arm.c > index d842d88..e3e5791 100644 > --- a/tools/libxl/libxl_arm.c > +++ b/tools/libxl/libxl_arm.c > @@ -1038,6 +1038,26 @@ int > libxl__arch_domain_finalise_hw_description(libxl__gc *gc, > return 0; > } > > +int libxl__arch_build_dom_finish(libxl__gc *gc, > + libxl_domain_build_info *info, > + struct xc_dom_image *dom, > + libxl__domain_build_state *state) > +{ > + int ret = 0; > + > + if (info->arch_arm.vuart) { > + ret = xc_dom_vpl011_init(CTX->xch, > + dom->guest_domid, > + dom->console_domid, > + dom->vuart_gfn, > + &state->vuart_port); > + if (ret < 0) > + LOG(ERROR, "xc_dom_vpl011_init failed\n"); > + } > + > + return ret; > +} > + > int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc, > uint32_t domid, > libxl_domain_build_info *info, > diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c > index e0f0d78..5f92023 100644 > --- a/tools/libxl/libxl_dom.c > +++ b/tools/libxl/libxl_dom.c > @@ -702,6 +702,10 @@ static int libxl__build_dom(libxl__gc *gc, uint32_t > domid, > LOGE(ERROR, "xc_dom_gnttab_init failed"); > goto out; > } > + if ((ret = libxl__arch_build_dom_finish(gc, info, dom, state)) != 0) { > + LOGE(ERROR, "libxl__arch_build_dom_finish failed"); > + goto out; > + } > > out: > return ret != 0 ? ERROR_FAIL : 0; > diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c > index 455f6f0..0aaeded 100644 > --- a/tools/libxl/libxl_x86.c > +++ b/tools/libxl/libxl_x86.c > @@ -391,6 +391,14 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc > *gc, > return rc; > } > > +int libxl__arch_build_dom_finish(libxl__gc *gc, > + libxl_domain_build_info *info, > + struct xc_dom_image *dom, > + libxl__domain_build_state *state) > +{ > + return 0; > +} > + > /* Return 0 on success, ERROR_* on failure. */ > int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc, > uint32_t domid, > diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c > index 2dc8b0a..a1cf0b3 100644 > --- a/xen/arch/arm/domain.c > +++ b/xen/arch/arm/domain.c > @@ -672,6 +672,11 @@ fail: > > void arch_domain_destroy(struct domain *d) > { > + /* > + * vpl011 is initialized via a DOMCTL call XEN_DOMCTL_vuart_op. > + */ > + domain_vpl011_deinit(d); > + > /* IOMMU page table is shared with P2M, always call > * iommu_domain_destroy() before p2m_teardown(). > */ > diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c > index db6838d..64a4366 100644 > --- a/xen/arch/arm/domctl.c > +++ b/xen/arch/arm/domctl.c > @@ -5,9 +5,11 @@ > */ > > #include <xen/errno.h> > +#include <xen/guest_access.h> > #include <xen/hypercall.h> > #include <xen/iocap.h> > #include <xen/lib.h> > +#include <xen/mm.h> > #include <xen/sched.h> > #include <xen/types.h> > #include <xsm/xsm.h> > @@ -119,6 +121,41 @@ long arch_do_domctl(struct xen_domctl *domctl, struct > domain *d, > d->disable_migrate = domctl->u.disable_migrate.disable; > return 0; > > + case XEN_DOMCTL_vuart_op: > + { > + int rc; > + struct xen_domctl_vuart_op *vuart_op = &domctl->u.vuart_op; > + > + switch(vuart_op->cmd) > + { > + case XEN_DOMCTL_VUART_OP_INIT: > + > + if ( !d->creation_finished ) > + { > + struct vpl011_init_info info; > + > + info.console_domid = vuart_op->console_domid; > + info.gfn = _gfn(vuart_op->gfn); > + > + rc = domain_vpl011_init(d, &info); > + if ( !rc ) > + { > + vuart_op->evtchn = info.evtchn; > + rc = __copy_to_guest(u_domctl, domctl, 1); > + } > + } > + else > + rc = - EPERM; > + > + break; > + > + default: > + rc = -EINVAL; > + break; > + } > + > + return rc; > + } > default: > { > int rc; > diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h > index ff39762..f70b7a4 100644 > --- a/xen/include/public/domctl.h > +++ b/xen/include/public/domctl.h > @@ -36,6 +36,7 @@ > #include "grant_table.h" > #include "hvm/save.h" > #include "memory.h" > +#include "event_channel.h" > > #define XEN_DOMCTL_INTERFACE_VERSION 0x0000000e > > @@ -1146,6 +1147,24 @@ struct xen_domctl_psr_cat_op { > uint32_t target; /* IN */ > uint64_t data; /* IN/OUT */ > }; > + > +struct xen_domctl_vuart_op { > +#define XEN_DOMCTL_VUART_OP_INIT 0 > + uint32_t cmd; /* XEN_DOMCTL_VUART_OP_* */ > + uint32_t console_domid; /* IN */ shouldn't this be domid_t? > +#define XEN_DOMCTL_VUART_TYPE_VPL011 0 > + uint32_t type; /* IN - type of vuart. > + * Currently only vpl011 supported. > + */ > + xen_pfn_t gfn; /* IN - guest gfn to be used as a > + * ring buffer. > + */ > + evtchn_port_t evtchn; /* OUT - remote port of the event > + * channel used for sending > + * ring buffer events. > + */ > +}; > + > typedef struct xen_domctl_psr_cat_op xen_domctl_psr_cat_op_t; > DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cat_op_t); > > @@ -1226,6 +1245,7 @@ struct xen_domctl { > #define XEN_DOMCTL_monitor_op 77 > #define XEN_DOMCTL_psr_cat_op 78 > #define XEN_DOMCTL_soft_reset 79 > +#define XEN_DOMCTL_vuart_op 80 > #define XEN_DOMCTL_gdbsx_guestmemio 1000 > #define XEN_DOMCTL_gdbsx_pausevcpu 1001 > #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 > @@ -1288,6 +1308,7 @@ struct xen_domctl { > struct xen_domctl_psr_cmt_op psr_cmt_op; > struct xen_domctl_monitor_op monitor_op; > struct xen_domctl_psr_cat_op psr_cat_op; > + struct xen_domctl_vuart_op vuart_op; > uint8_t pad[128]; > } u; > }; > -- > 2.7.4 > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |