|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3 2/3] xen/oprofile: use set_nmi_continuation() for sending virq to guest
Instead of calling send_guest_vcpu_virq() from NMI context use the
NMI continuation framework for that purpose. This avoids taking locks
in NMI mode.
Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
xen/arch/x86/oprofile/nmi_int.c | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/oprofile/nmi_int.c b/xen/arch/x86/oprofile/nmi_int.c
index 0f103d80a6..825f0aeef0 100644
--- a/xen/arch/x86/oprofile/nmi_int.c
+++ b/xen/arch/x86/oprofile/nmi_int.c
@@ -83,6 +83,13 @@ void passive_domain_destroy(struct vcpu *v)
model->free_msr(v);
}
+static void nmi_oprofile_send_virq(void *arg)
+{
+ struct vcpu *v = arg;
+
+ send_guest_vcpu_virq(v, VIRQ_XENOPROF);
+}
+
static int nmi_callback(const struct cpu_user_regs *regs, int cpu)
{
int xen_mode, ovf;
@@ -90,7 +97,7 @@ static int nmi_callback(const struct cpu_user_regs *regs, int
cpu)
ovf = model->check_ctrs(cpu, &cpu_msrs[cpu], regs);
xen_mode = ring_0(regs);
if ( ovf && is_active(current->domain) && !xen_mode )
- send_guest_vcpu_virq(current, VIRQ_XENOPROF);
+ set_nmi_continuation(nmi_oprofile_send_virq, current);
if ( ovf == 2 )
current->arch.nmi_pending = true;
--
2.26.2
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |