[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v9 2/3] ioreq-server: remove p2m entries when server is enabled
For secondary servers, add a hvm op to enable/disable the server. The server will not accept IO until it is enabled and the act of enabling the server removes its pages from the guest p2m, thus preventing the guest from directly mapping the pages and synthesizing ioreqs. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> --- tools/libxc/xc_domain.c | 27 +++++++ tools/libxc/xenctrl.h | 18 +++++ xen/arch/x86/hvm/hvm.c | 151 +++++++++++++++++++++++++++++++++++++- xen/include/asm-x86/hvm/domain.h | 1 + xen/include/public/hvm/hvm_op.h | 19 +++++ 5 files changed, 215 insertions(+), 1 deletion(-) diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c index a8c9f81..17a8417 100644 --- a/tools/libxc/xc_domain.c +++ b/tools/libxc/xc_domain.c @@ -1508,6 +1508,33 @@ int xc_hvm_destroy_ioreq_server(xc_interface *xch, return rc; } +int xc_hvm_set_ioreq_server_state(xc_interface *xch, + domid_t domid, + ioservid_t id, + int enabled) +{ + DECLARE_HYPERCALL; + DECLARE_HYPERCALL_BUFFER(xen_hvm_set_ioreq_server_state_t, arg); + int rc; + + arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg)); + if ( arg == NULL ) + return -1; + + hypercall.op = __HYPERVISOR_hvm_op; + hypercall.arg[0] = HVMOP_set_ioreq_server_state; + hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg); + + arg->domid = domid; + arg->id = id; + arg->enabled = !!enabled; + + rc = do_xen_hypercall(xch, &hypercall); + + xc_hypercall_buffer_free(xch, arg); + return rc; +} + int xc_domain_setdebugging(xc_interface *xch, uint32_t domid, unsigned int enable) diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h index 3260a56..2045084 100644 --- a/tools/libxc/xenctrl.h +++ b/tools/libxc/xenctrl.h @@ -1829,6 +1829,24 @@ int xc_hvm_get_ioreq_server_info(xc_interface *xch, evtchn_port_t *bufioreq_port); /** + * This function sets IOREQ Server state. An IOREQ Server + * will not be passed emulation requests until it is in + * the enabled state. + * Note that the contents of the ioreq_pfn and bufioreq_pfn are + * not meaningful until the IOREQ Server is in the enabled state. + * + * @parm xch a handle to an open hypervisor interface. + * @parm domid the domain id to be serviced + * @parm id the IOREQ Server id. + * @parm enabled the state. + * @return 0 on success, -1 on failure. + */ +int xc_hvm_set_ioreq_server_state(xc_interface *xch, + domid_t domid, + ioservid_t id, + int enabled); + +/** * This function registers a range of memory or I/O ports for emulation. * * @parm xch a handle to an open hypervisor interface. diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 2038d3c..a748e06 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -554,6 +554,22 @@ static int hvm_map_ioreq_page( return 0; } +static void hvm_remove_ioreq_gmfn( + struct domain *d, struct hvm_ioreq_page *iorp) +{ + guest_physmap_remove_page(d, iorp->gmfn, + page_to_mfn(iorp->page), 0); + clear_page(iorp->va); +} + +static int hvm_add_ioreq_gmfn( + struct domain *d, struct hvm_ioreq_page *iorp) +{ + clear_page(iorp->va); + return guest_physmap_add_page(d, iorp->gmfn, + page_to_mfn(iorp->page), 0); +} + static int hvm_print_line( int dir, uint32_t port, uint32_t bytes, uint32_t *val) { @@ -657,7 +673,8 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s, list_add(&sv->list_entry, &s->ioreq_vcpu_list); - hvm_update_ioreq_evtchn(s, sv); + if ( s->enabled ) + hvm_update_ioreq_evtchn(s, sv); spin_unlock(&s->lock); return 0; @@ -845,6 +862,56 @@ static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s, rangeset_destroy(s->range[i]); } +static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s, + bool_t is_default) +{ + struct domain *d = s->domain; + struct hvm_ioreq_vcpu *sv; + + spin_lock(&s->lock); + + if ( s->enabled ) + goto done; + + if ( !is_default ) + { + hvm_remove_ioreq_gmfn(d, &s->ioreq); + hvm_remove_ioreq_gmfn(d, &s->bufioreq); + } + + s->enabled = 1; + + list_for_each_entry ( sv, + &s->ioreq_vcpu_list, + list_entry ) + hvm_update_ioreq_evtchn(s, sv); + + done: + spin_unlock(&s->lock); +} + +static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s, + bool_t is_default) +{ + struct domain *d = s->domain; + + spin_lock(&s->lock); + + if ( !s->enabled ) + goto done; + + if ( !is_default ) + { + hvm_add_ioreq_gmfn(d, &s->bufioreq); + hvm_add_ioreq_gmfn(d, &s->ioreq); + } + + s->enabled = 0; + + done: + spin_unlock(&s->lock); +} + static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, struct domain *d, domid_t domid, bool_t is_default, ioservid_t id) @@ -950,7 +1017,10 @@ static int hvm_create_ioreq_server(struct domain *d, domid_t domid, &d->arch.hvm_domain.ioreq_server.list); if ( is_default ) + { d->arch.hvm_domain.default_ioreq_server = s; + hvm_ioreq_server_enable(s, 1); + } if ( id ) *id = s->id; @@ -1143,6 +1213,45 @@ static int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id, return rc; } +static int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id, + bool_t enabled) +{ + struct list_head *entry; + int rc; + + spin_lock(&d->arch.hvm_domain.ioreq_server.lock); + + rc = -ENOENT; + list_for_each ( entry, + &d->arch.hvm_domain.ioreq_server.list ) + { + struct hvm_ioreq_server *s = list_entry(entry, + struct hvm_ioreq_server, + list_entry); + + if ( s == d->arch.hvm_domain.default_ioreq_server ) + continue; + + if ( s->id != id ) + continue; + + domain_pause(d); + + if ( enabled ) + hvm_ioreq_server_enable(s, 0); + else + hvm_ioreq_server_disable(s, 0); + + domain_unpause(d); + + rc = 0; + break; + } + + spin_unlock(&d->arch.hvm_domain.ioreq_server.lock); + return rc; +} + static int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v) { struct hvm_ioreq_server *s; @@ -1206,7 +1315,10 @@ static void hvm_destroy_all_ioreq_servers(struct domain *d) bool_t is_default = (s == d->arch.hvm_domain.default_ioreq_server); if ( is_default ) + { + hvm_ioreq_server_disable(s, 1); d->arch.hvm_domain.default_ioreq_server = NULL; + } list_del(&s->list_entry); @@ -2200,6 +2312,9 @@ static struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, if ( s == d->arch.hvm_domain.default_ioreq_server ) continue; + if ( !s->enabled ) + continue; + BUILD_BUG_ON(IOREQ_TYPE_PIO != HVMOP_IO_RANGE_PORT); BUILD_BUG_ON(IOREQ_TYPE_COPY != HVMOP_IO_RANGE_MEMORY); BUILD_BUG_ON(IOREQ_TYPE_PCI_CONFIG != HVMOP_IO_RANGE_PCI); @@ -5131,6 +5246,35 @@ static int hvmop_unmap_io_range_from_ioreq_server( return rc; } +static int hvmop_set_ioreq_server_state( + XEN_GUEST_HANDLE_PARAM(xen_hvm_set_ioreq_server_state_t) uop) +{ + xen_hvm_set_ioreq_server_state_t op; + struct domain *d; + int rc; + + if ( copy_from_guest(&op, uop, 1) ) + return -EFAULT; + + rc = rcu_lock_remote_domain_by_id(op.domid, &d); + if ( rc != 0 ) + return rc; + + rc = -EINVAL; + if ( !is_hvm_domain(d) ) + goto out; + + rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_set_ioreq_server_state); + if ( rc != 0 ) + goto out; + + rc = hvm_set_ioreq_server_state(d, op.id, !!op.enabled); + + out: + rcu_unlock_domain(d); + return rc; +} + static int hvmop_destroy_ioreq_server( XEN_GUEST_HANDLE_PARAM(xen_hvm_destroy_ioreq_server_t) uop) { @@ -5190,6 +5334,11 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) rc = hvmop_unmap_io_range_from_ioreq_server( guest_handle_cast(arg, xen_hvm_io_range_t)); break; + + case HVMOP_set_ioreq_server_state: + rc = hvmop_set_ioreq_server_state( + guest_handle_cast(arg, xen_hvm_set_ioreq_server_state_t)); + break; case HVMOP_destroy_ioreq_server: rc = hvmop_destroy_ioreq_server( diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index d9ae6d1..291a2e0 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -69,6 +69,7 @@ struct hvm_ioreq_server { spinlock_t bufioreq_lock; evtchn_port_t bufioreq_evtchn; struct rangeset *range[NR_IO_RANGE_TYPES]; + bool_t enabled; }; struct hvm_domain { diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h index 38063af..5849eac 100644 --- a/xen/include/public/hvm/hvm_op.h +++ b/xen/include/public/hvm/hvm_op.h @@ -342,6 +342,25 @@ struct xen_hvm_destroy_ioreq_server { typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t); +/* + * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id> servicing + * domain <domid>. + * + * The IOREQ Server will not be passed any emulation requests until it is in the + * enabled state. + * Note that the contents of the ioreq_pfn and bufioreq_fn (see + * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in + * the enabled state. + */ +#define HVMOP_set_ioreq_server_state 22 +struct xen_hvm_set_ioreq_server_state { + domid_t domid; /* IN - domain to be serviced */ + ioservid_t id; /* IN - server id */ + uint8_t enabled; /* IN - enabled? */ +}; +typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t); + #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |