[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] xen: allow global VIRQ handlers to be delegated to other domains
# HG changeset patch # User Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> # Date 1327758483 0 # Node ID 1e155189c9a4f6e9d78cf3ea955c9c0fad07d7f7 # Parent 34c96082d69389e5891bbc1c424d0199355632df xen: allow global VIRQ handlers to be delegated to other domains This patch sends global VIRQs to a domain designated as the VIRQ handler instead of sending all global VIRQ events to dom0. This is required in order to run xenstored in a stubdom, because VIRQ_DOM_EXC must be sent to xenstored for domain destruction to work properly. This patch was inspired by the xenstored stubdomain patch series sent to xen-devel by Alex Zeffertt in 2009. Signed-off-by: Diego Ongaro <diego.ongaro@xxxxxxxxxx> Signed-off-by: Alex Zeffertt <alex.zeffertt@xxxxxxxxxxxxx> Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx> Committed-by: Keir Fraser <keir@xxxxxxx> --- diff -r 34c96082d693 -r 1e155189c9a4 tools/flask/policy/policy/flask/access_vectors --- a/tools/flask/policy/policy/flask/access_vectors Sat Jan 28 13:47:24 2012 +0000 +++ b/tools/flask/policy/policy/flask/access_vectors Sat Jan 28 13:48:03 2012 +0000 @@ -85,6 +85,7 @@ getpodtarget setpodtarget set_misc_info + set_virq_handler } class hvm diff -r 34c96082d693 -r 1e155189c9a4 tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c Sat Jan 28 13:47:24 2012 +0000 +++ b/tools/libxc/xc_domain.c Sat Jan 28 13:48:03 2012 +0000 @@ -1504,6 +1504,16 @@ return do_domctl(xch, &domctl); } +int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq) +{ + DECLARE_DOMCTL; + + domctl.cmd = XEN_DOMCTL_set_virq_handler; + domctl.domain = domid; + domctl.u.set_virq_handler.virq = virq; + return do_domctl(xch, &domctl); +} + /* * Local variables: * mode: C diff -r 34c96082d693 -r 1e155189c9a4 tools/libxc/xenctrl.h --- a/tools/libxc/xenctrl.h Sat Jan 28 13:47:24 2012 +0000 +++ b/tools/libxc/xenctrl.h Sat Jan 28 13:48:03 2012 +0000 @@ -749,6 +749,15 @@ int xc_domain_set_access_required(xc_interface *xch, uint32_t domid, unsigned int required); +/** + * This function sets the handler of global VIRQs sent by the hypervisor + * + * @parm xch a handle to an open hypervisor interface + * @parm domid the domain id which will handle the VIRQ + * @parm virq the virq number (VIRQ_*) + * return 0 on success, -1 on failure + */ +int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq); /* * CPUPOOL MANAGEMENT FUNCTIONS diff -r 34c96082d693 -r 1e155189c9a4 xen/arch/x86/cpu/mcheck/amd_nonfatal.c --- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c Sat Jan 28 13:48:03 2012 +0000 @@ -100,7 +100,7 @@ if (dom0_vmce_enabled()) { mctelem_commit(mctc); - send_guest_global_virq(dom0, VIRQ_MCA); + send_global_virq(VIRQ_MCA); } else if (++dumpcount >= 10) { x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc)); mctelem_dismiss(mctc); diff -r 34c96082d693 -r 1e155189c9a4 xen/arch/x86/cpu/mcheck/mce.c --- a/xen/arch/x86/cpu/mcheck/mce.c Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/arch/x86/cpu/mcheck/mce.c Sat Jan 28 13:48:03 2012 +0000 @@ -594,7 +594,7 @@ if (dom0_vmce_enabled()) { if (mctc != NULL) mctelem_commit(mctc); - send_guest_global_virq(dom0, VIRQ_MCA); + send_global_virq(VIRQ_MCA); } else { x86_mcinfo_dump(mci); if (mctc != NULL) diff -r 34c96082d693 -r 1e155189c9a4 xen/arch/x86/cpu/mcheck/mce_intel.c --- a/xen/arch/x86/cpu/mcheck/mce_intel.c Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c Sat Jan 28 13:48:03 2012 +0000 @@ -354,7 +354,7 @@ /* Step2: Send Log to DOM0 through vIRQ */ if (dom0_vmce_enabled()) { mce_printk(MCE_VERBOSE, "MCE: send MCE# to DOM0 through virq\n"); - send_guest_global_virq(dom0, VIRQ_MCA); + send_global_virq(VIRQ_MCA); } } @@ -1085,7 +1085,7 @@ if (bs.errcnt && mctc != NULL) { if (dom0_vmce_enabled()) { mctelem_commit(mctc); - send_guest_global_virq(dom0, VIRQ_MCA); + send_global_virq(VIRQ_MCA); } else { x86_mcinfo_dump(mctelem_dataptr(mctc)); mctelem_dismiss(mctc); @@ -1205,7 +1205,7 @@ if (dom0_vmce_enabled()) { mctelem_commit(mctc); mce_printk(MCE_VERBOSE, "CMCI: send CMCI to DOM0 through virq\n"); - send_guest_global_virq(dom0, VIRQ_MCA); + send_global_virq(VIRQ_MCA); } else { x86_mcinfo_dump(mctelem_dataptr(mctc)); mctelem_dismiss(mctc); diff -r 34c96082d693 -r 1e155189c9a4 xen/arch/x86/cpu/mcheck/non-fatal.c --- a/xen/arch/x86/cpu/mcheck/non-fatal.c Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/arch/x86/cpu/mcheck/non-fatal.c Sat Jan 28 13:48:03 2012 +0000 @@ -55,7 +55,7 @@ if (dom0_vmce_enabled()) { mctelem_commit(mctc); - send_guest_global_virq(dom0, VIRQ_MCA); + send_global_virq(VIRQ_MCA); } else if (++dumpcount >= 10) { x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc)); mctelem_dismiss(mctc); diff -r 34c96082d693 -r 1e155189c9a4 xen/common/cpu.c --- a/xen/common/cpu.c Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/common/cpu.c Sat Jan 28 13:48:03 2012 +0000 @@ -108,7 +108,7 @@ notifier_rc = notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu, NULL); BUG_ON(notifier_rc != NOTIFY_DONE); - send_guest_global_virq(dom0, VIRQ_PCPU_STATE); + send_global_virq(VIRQ_PCPU_STATE); cpu_hotplug_done(); return 0; @@ -148,7 +148,7 @@ notifier_rc = notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu, NULL); BUG_ON(notifier_rc != NOTIFY_DONE); - send_guest_global_virq(dom0, VIRQ_PCPU_STATE); + send_global_virq(VIRQ_PCPU_STATE); cpu_hotplug_done(); return 0; diff -r 34c96082d693 -r 1e155189c9a4 xen/common/domain.c --- a/xen/common/domain.c Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/common/domain.c Sat Jan 28 13:48:03 2012 +0000 @@ -86,7 +86,7 @@ if ( (d->shutdown_code == SHUTDOWN_suspend) && d->suspend_evtchn ) evtchn_send(d, d->suspend_evtchn); else - send_guest_global_virq(dom0, VIRQ_DOM_EXC); + send_global_virq(VIRQ_DOM_EXC); } static void vcpu_check_shutdown(struct vcpu *v) @@ -480,7 +480,7 @@ } d->is_dying = DOMDYING_dead; put_domain(d); - send_guest_global_virq(dom0, VIRQ_DOM_EXC); + send_global_virq(VIRQ_DOM_EXC); /* fallthrough */ case DOMDYING_dead: break; @@ -621,7 +621,7 @@ for_each_vcpu ( d, v ) vcpu_sleep_nosync(v); - send_guest_global_virq(dom0, VIRQ_DEBUGGER); + send_global_virq(VIRQ_DEBUGGER); } /* Complete domain destroy after RCU readers are not holding old references. */ @@ -680,7 +680,7 @@ free_cpumask_var(d->domain_dirty_cpumask); free_domain_struct(d); - send_guest_global_virq(dom0, VIRQ_DOM_EXC); + send_global_virq(VIRQ_DOM_EXC); } /* Release resources belonging to task @p. */ diff -r 34c96082d693 -r 1e155189c9a4 xen/common/domctl.c --- a/xen/common/domctl.c Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/common/domctl.c Sat Jan 28 13:48:03 2012 +0000 @@ -995,6 +995,23 @@ } break; + case XEN_DOMCTL_set_virq_handler: + { + struct domain *d; + uint32_t virq = op->u.set_virq_handler.virq; + + ret = -ESRCH; + d = rcu_lock_domain_by_id(op->domain); + if ( d != NULL ) + { + ret = xsm_set_virq_handler(d, virq); + if ( !ret ) + ret = set_global_virq_handler(d, virq); + rcu_unlock_domain(d); + } + } + break; + default: ret = arch_do_domctl(op, u_domctl); break; diff -r 34c96082d693 -r 1e155189c9a4 xen/common/event_channel.c --- a/xen/common/event_channel.c Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/common/event_channel.c Sat Jan 28 13:48:03 2012 +0000 @@ -689,7 +689,7 @@ spin_unlock_irqrestore(&v->virq_lock, flags); } -void send_guest_global_virq(struct domain *d, int virq) +static void send_guest_global_virq(struct domain *d, int virq) { unsigned long flags; int port; @@ -739,6 +739,68 @@ return evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port); } +static struct domain *global_virq_handlers[NR_VIRQS] __read_mostly; + +static DEFINE_SPINLOCK(global_virq_handlers_lock); + +void send_global_virq(uint32_t virq) +{ + ASSERT(virq < NR_VIRQS); + ASSERT(virq_is_global(virq)); + + send_guest_global_virq(global_virq_handlers[virq] ?: dom0, virq); +} + +int set_global_virq_handler(struct domain *d, uint32_t virq) +{ + struct domain *old; + + if (virq >= NR_VIRQS) + return -EINVAL; + if (!virq_is_global(virq)) + return -EINVAL; + + if (global_virq_handlers[virq] == d) + return 0; + + if (unlikely(!get_domain(d))) + return -EINVAL; + + spin_lock(&global_virq_handlers_lock); + old = global_virq_handlers[virq]; + global_virq_handlers[virq] = d; + spin_unlock(&global_virq_handlers_lock); + + if (old != NULL) + put_domain(old); + + return 0; +} + +static void clear_global_virq_handlers(struct domain *d) +{ + uint32_t virq; + int put_count = 0; + + spin_lock(&global_virq_handlers_lock); + + for (virq = 0; virq < NR_VIRQS; virq++) + { + if (global_virq_handlers[virq] == d) + { + global_virq_handlers[virq] = NULL; + put_count++; + } + } + + spin_unlock(&global_virq_handlers_lock); + + while (put_count) + { + put_domain(d); + put_count--; + } +} static long evtchn_status(evtchn_status_t *status) { @@ -1160,6 +1222,8 @@ d->evtchn[i] = NULL; } spin_unlock(&d->event_lock); + + clear_global_virq_handlers(d); } diff -r 34c96082d693 -r 1e155189c9a4 xen/common/trace.c --- a/xen/common/trace.c Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/common/trace.c Sat Jan 28 13:48:03 2012 +0000 @@ -661,7 +661,7 @@ */ static void trace_notify_dom0(unsigned long unused) { - send_guest_global_virq(dom0, VIRQ_TBUF); + send_global_virq(VIRQ_TBUF); } static DECLARE_SOFTIRQ_TASKLET(trace_notify_dom0_tasklet, trace_notify_dom0, 0); diff -r 34c96082d693 -r 1e155189c9a4 xen/drivers/char/console.c --- a/xen/drivers/char/console.c Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/drivers/char/console.c Sat Jan 28 13:48:03 2012 +0000 @@ -288,7 +288,7 @@ if ( (serial_rx_prod-serial_rx_cons) != SERIAL_RX_SIZE ) serial_rx_ring[SERIAL_RX_MASK(serial_rx_prod++)] = c; /* Always notify the guest: prevents receive path from getting stuck. */ - send_guest_global_virq(dom0, VIRQ_CONSOLE); + send_global_virq(VIRQ_CONSOLE); } static void serial_rx(char c, struct cpu_user_regs *regs) @@ -315,7 +315,7 @@ static void notify_dom0_con_ring(unsigned long unused) { - send_guest_global_virq(dom0, VIRQ_CON_RING); + send_global_virq(VIRQ_CON_RING); } static DECLARE_SOFTIRQ_TASKLET(notify_dom0_con_ring_tasklet, notify_dom0_con_ring, 0); diff -r 34c96082d693 -r 1e155189c9a4 xen/include/public/domctl.h --- a/xen/include/public/domctl.h Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/include/public/domctl.h Sat Jan 28 13:48:03 2012 +0000 @@ -830,6 +830,12 @@ typedef struct xen_domctl_audit_p2m xen_domctl_audit_p2m_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_audit_p2m_t); +struct xen_domctl_set_virq_handler { + uint32_t virq; /* IN */ +}; +typedef struct xen_domctl_set_virq_handler xen_domctl_set_virq_handler_t; +DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_virq_handler_t); + #if defined(__i386__) || defined(__x86_64__) /* XEN_DOMCTL_setvcpuextstate */ /* XEN_DOMCTL_getvcpuextstate */ @@ -929,6 +935,7 @@ #define XEN_DOMCTL_getvcpuextstate 63 #define XEN_DOMCTL_set_access_required 64 #define XEN_DOMCTL_audit_p2m 65 +#define XEN_DOMCTL_set_virq_handler 66 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 @@ -983,6 +990,7 @@ #endif struct xen_domctl_set_access_required access_required; struct xen_domctl_audit_p2m audit_p2m; + struct xen_domctl_set_virq_handler set_virq_handler; struct xen_domctl_gdbsx_memio gdbsx_guest_memio; struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu; struct xen_domctl_gdbsx_domstatus gdbsx_domstatus; diff -r 34c96082d693 -r 1e155189c9a4 xen/include/xen/event.h --- a/xen/include/xen/event.h Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/include/xen/event.h Sat Jan 28 13:48:03 2012 +0000 @@ -23,11 +23,17 @@ void send_guest_vcpu_virq(struct vcpu *v, int virq); /* - * send_guest_global_virq: Notify guest via a global VIRQ. - * @d: Domain to which virtual IRQ should be sent + * send_global_virq: Notify the domain handling a global VIRQ. * @virq: Virtual IRQ number (VIRQ_*) */ -void send_guest_global_virq(struct domain *d, int virq); +void send_global_virq(uint32_t virq); + +/* + * sent_global_virq_handler: Set a global VIRQ handler. + * @d: New target domain for this VIRQ + * @virq: Virtual IRQ number (VIRQ_*), must be global + */ +int set_global_virq_handler(struct domain *d, uint32_t virq); /* * send_guest_pirq: diff -r 34c96082d693 -r 1e155189c9a4 xen/include/xsm/xsm.h --- a/xen/include/xsm/xsm.h Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/include/xsm/xsm.h Sat Jan 28 13:48:03 2012 +0000 @@ -64,6 +64,7 @@ int (*domain_settime) (struct domain *d); int (*set_target) (struct domain *d, struct domain *e); int (*domctl) (struct domain *d, int cmd); + int (*set_virq_handler) (struct domain *d, uint32_t virq); int (*tbufcontrol) (void); int (*readconsole) (uint32_t clear); int (*sched_id) (void); @@ -265,6 +266,11 @@ return xsm_call(domctl(d, cmd)); } +static inline int xsm_set_virq_handler (struct domain *d, uint32_t virq) +{ + return xsm_call(set_virq_handler(d, virq)); +} + static inline int xsm_tbufcontrol (void) { return xsm_call(tbufcontrol()); diff -r 34c96082d693 -r 1e155189c9a4 xen/xsm/dummy.c --- a/xen/xsm/dummy.c Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/xsm/dummy.c Sat Jan 28 13:48:03 2012 +0000 @@ -94,6 +94,11 @@ return 0; } +static int dummy_set_virq_handler(struct domain *d, uint32_t virq) +{ + return 0; +} + static int dummy_tbufcontrol (void) { return 0; @@ -596,6 +601,7 @@ set_to_dummy_if_null(ops, domain_settime); set_to_dummy_if_null(ops, set_target); set_to_dummy_if_null(ops, domctl); + set_to_dummy_if_null(ops, set_virq_handler); set_to_dummy_if_null(ops, tbufcontrol); set_to_dummy_if_null(ops, readconsole); set_to_dummy_if_null(ops, sched_id); diff -r 34c96082d693 -r 1e155189c9a4 xen/xsm/flask/hooks.c --- a/xen/xsm/flask/hooks.c Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/xsm/flask/hooks.c Sat Jan 28 13:48:03 2012 +0000 @@ -597,6 +597,11 @@ return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__SET_MISC_INFO); } +static int flask_set_virq_handler(struct domain *d, uint32_t virq) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__SET_VIRQ_HANDLER); +} + static int flask_tbufcontrol(void) { return domain_has_xen(current->domain, XEN__TBUFCONTROL); @@ -1460,6 +1465,7 @@ .domain_settime = flask_domain_settime, .set_target = flask_set_target, .domctl = flask_domctl, + .set_virq_handler = flask_set_virq_handler, .tbufcontrol = flask_tbufcontrol, .readconsole = flask_readconsole, .sched_id = flask_sched_id, diff -r 34c96082d693 -r 1e155189c9a4 xen/xsm/flask/include/av_perm_to_string.h --- a/xen/xsm/flask/include/av_perm_to_string.h Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/xsm/flask/include/av_perm_to_string.h Sat Jan 28 13:48:03 2012 +0000 @@ -60,6 +60,7 @@ S_(SECCLASS_DOMAIN, DOMAIN__GETPODTARGET, "getpodtarget") S_(SECCLASS_DOMAIN, DOMAIN__SETPODTARGET, "setpodtarget") S_(SECCLASS_DOMAIN, DOMAIN__SET_MISC_INFO, "set_misc_info") + S_(SECCLASS_DOMAIN, DOMAIN__SET_VIRQ_HANDLER, "set_virq_handler") S_(SECCLASS_HVM, HVM__SETHVMC, "sethvmc") S_(SECCLASS_HVM, HVM__GETHVMC, "gethvmc") S_(SECCLASS_HVM, HVM__SETPARAM, "setparam") diff -r 34c96082d693 -r 1e155189c9a4 xen/xsm/flask/include/av_permissions.h --- a/xen/xsm/flask/include/av_permissions.h Sat Jan 28 13:47:24 2012 +0000 +++ b/xen/xsm/flask/include/av_permissions.h Sat Jan 28 13:48:03 2012 +0000 @@ -61,6 +61,7 @@ #define DOMAIN__GETPODTARGET 0x10000000UL #define DOMAIN__SETPODTARGET 0x20000000UL #define DOMAIN__SET_MISC_INFO 0x40000000UL +#define DOMAIN__SET_VIRQ_HANDLER 0x80000000UL #define HVM__SETHVMC 0x00000001UL #define HVM__GETHVMC 0x00000002UL _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |