[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/7] vm-event: introduce vm_event_vcpu_enter



In an effort to improve on the vm-event interface, we introduce a new function
called vm_event_vcpu_enter. Its significance is that of a "final touch" vCPU
function - i.e. it should be called by implementing architectures just before
re-entering vCPUs.
On X86 for example, it is called on the scheduling tail (hvm_do_resume) and just
before reentering the guest world after a hypervisor trap (vmx_vmenter_helper).

Signed-off-by: Corneliu ZUZU <czuzu@xxxxxxxxxxxxxxx>
---
 xen/arch/arm/domain.c          |  5 ++++-
 xen/arch/arm/traps.c           |  2 ++
 xen/arch/x86/hvm/emulate.c     |  2 +-
 xen/arch/x86/hvm/event.c       |  1 -
 xen/arch/x86/hvm/hvm.c         |  3 ++-
 xen/arch/x86/hvm/vmx/vmx.c     |  4 ++++
 xen/arch/x86/mm/p2m.c          |  1 -
 xen/arch/x86/vm_event.c        |  4 +---
 xen/common/monitor.c           |  2 +-
 xen/common/vm_event.c          |  1 -
 xen/include/asm-arm/vm_event.h |  6 +++++-
 xen/include/asm-x86/vm_event.h |  6 +++++-
 xen/include/xen/vm_event.h     | 15 +++++++++++++++
 13 files changed, 40 insertions(+), 12 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index d31f821..ba248c8 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -19,6 +19,7 @@
 #include <xen/errno.h>
 #include <xen/bitops.h>
 #include <xen/grant_table.h>
+#include <xen/vm_event.h>
 
 #include <asm/current.h>
 #include <asm/event.h>
@@ -251,6 +252,8 @@ static void schedule_tail(struct vcpu *prev)
 
     ctxt_switch_to(current);
 
+    vm_event_vcpu_enter(current);
+
     local_irq_enable();
 
     context_saved(prev);
@@ -296,7 +299,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
 
 void continue_running(struct vcpu *same)
 {
-    /* Nothing to do */
+    vm_event_vcpu_enter(same);
 }
 
 void sync_local_execstate(void)
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 7fa2ae5..8c50685 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -32,6 +32,7 @@
 #include <xen/domain_page.h>
 #include <xen/perfc.h>
 #include <xen/virtual_region.h>
+#include <xen/vm_event.h>
 #include <public/sched.h>
 #include <public/xen.h>
 #include <asm/debugger.h>
@@ -2662,6 +2663,7 @@ asmlinkage void leave_hypervisor_tail(void)
     {
         local_irq_disable();
         if (!softirq_pending(smp_processor_id())) {
+            vm_event_vcpu_enter(current);
             gic_inject();
             return;
         }
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 266ed89..9b2872a 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -15,6 +15,7 @@
 #include <xen/sched.h>
 #include <xen/paging.h>
 #include <xen/trace.h>
+#include <xen/vm_event.h>
 #include <asm/event.h>
 #include <asm/xstate.h>
 #include <asm/hvm/emulate.h>
@@ -23,7 +24,6 @@
 #include <asm/hvm/trace.h>
 #include <asm/hvm/support.h>
 #include <asm/hvm/svm/svm.h>
-#include <asm/vm_event.h>
 
 static void hvmtrace_io_assist(const ioreq_t *p)
 {
diff --git a/xen/arch/x86/hvm/event.c b/xen/arch/x86/hvm/event.c
index 9c51890..26165b4 100644
--- a/xen/arch/x86/hvm/event.c
+++ b/xen/arch/x86/hvm/event.c
@@ -25,7 +25,6 @@
 #include <asm/hvm/event.h>
 #include <asm/paging.h>
 #include <asm/monitor.h>
-#include <asm/vm_event.h>
 #include <public/vm_event.h>
 
 bool_t hvm_event_cr(unsigned int index, unsigned long value, unsigned long old)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 78db903..770bb50 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -65,7 +65,6 @@
 #include <asm/altp2m.h>
 #include <asm/mtrr.h>
 #include <asm/apic.h>
-#include <asm/vm_event.h>
 #include <public/sched.h>
 #include <public/hvm/ioreq.h>
 #include <public/version.h>
@@ -509,6 +508,8 @@ void hvm_do_resume(struct vcpu *v)
         }
     }
 
+    vm_event_vcpu_enter(v);
+
     /* Inject pending hw/sw trap */
     if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
     {
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 670d7dc..b43b94a 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -25,6 +25,7 @@
 #include <xen/domain_page.h>
 #include <xen/hypercall.h>
 #include <xen/perfc.h>
+#include <xen/vm_event.h>
 #include <asm/current.h>
 #include <asm/io.h>
 #include <asm/iocap.h>
@@ -3874,6 +3875,9 @@ void vmx_vmenter_helper(const struct cpu_user_regs *regs)
     }
 
  out:
+    if ( guest_mode(regs) )
+        vm_event_vcpu_enter(curr);
+
     HVMTRACE_ND(VMENTRY, 0, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
 
     __vmwrite(GUEST_RIP,    regs->rip);
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 89462b2..9d37b12 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -36,7 +36,6 @@
 #include <asm/hvm/nestedhvm.h>
 #include <asm/altp2m.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
-#include <asm/vm_event.h>
 #include <xsm/xsm.h>
 
 #include "mm-locks.h"
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index 75647c4..f7eb24a 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -18,9 +18,7 @@
  * License along with this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <xen/sched.h>
-#include <asm/hvm/hvm.h>
-#include <asm/vm_event.h>
+#include <xen/vm_event.h>
 
 /* Implicitly serialized by the domctl lock. */
 int vm_event_init_domain(struct domain *d)
diff --git a/xen/common/monitor.c b/xen/common/monitor.c
index b30857a..c46df5a 100644
--- a/xen/common/monitor.c
+++ b/xen/common/monitor.c
@@ -22,8 +22,8 @@
 #include <xen/monitor.h>
 #include <xen/sched.h>
 #include <xsm/xsm.h>
+#include <xen/vm_event.h>
 #include <asm/monitor.h>
-#include <asm/vm_event.h>
 
 int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop)
 {
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 2906407..15152ba 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -27,7 +27,6 @@
 #include <xen/mem_access.h>
 #include <asm/p2m.h>
 #include <asm/altp2m.h>
-#include <asm/vm_event.h>
 #include <xsm/xsm.h>
 
 /* for public/io/ring.h macros */
diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h
index 05c3027..4e5a272 100644
--- a/xen/include/asm-arm/vm_event.h
+++ b/xen/include/asm-arm/vm_event.h
@@ -20,7 +20,6 @@
 #define __ASM_ARM_VM_EVENT_H__
 
 #include <xen/sched.h>
-#include <xen/vm_event.h>
 #include <public/domctl.h>
 
 static inline int vm_event_init_domain(struct domain *d)
@@ -56,6 +55,11 @@ static inline void vm_event_fill_regs(vm_event_request_t 
*req)
     /* Not supported on ARM. */
 }
 
+static inline void arch_vm_event_vcpu_enter(struct vcpu *v)
+{
+    /* Nothing to do. */
+}
+
 /*
  * Monitor vm-events.
  */
diff --git a/xen/include/asm-x86/vm_event.h b/xen/include/asm-x86/vm_event.h
index df8e98d..6fb3b58 100644
--- a/xen/include/asm-x86/vm_event.h
+++ b/xen/include/asm-x86/vm_event.h
@@ -20,7 +20,6 @@
 #define __ASM_X86_VM_EVENT_H__
 
 #include <xen/sched.h>
-#include <xen/vm_event.h>
 
 /*
  * Should we emulate the next matching instruction on VCPU resume
@@ -44,6 +43,11 @@ void vm_event_set_registers(struct vcpu *v, 
vm_event_response_t *rsp);
 
 void vm_event_fill_regs(vm_event_request_t *req);
 
+static inline void arch_vm_event_vcpu_enter(struct vcpu *v)
+{
+    /* Nothing to do. */
+}
+
 /*
  * Monitor vm-events.
  */
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index a10ee40..f124143 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -24,6 +24,7 @@
 #define __VM_EVENT_H__
 
 #include <xen/sched.h>
+#include <asm/vm_event.h>
 #include <public/vm_event.h>
 
 /* Clean up on domain destruction */
@@ -72,6 +73,20 @@ void vm_event_resume(struct domain *d, struct 
vm_event_domain *ved);
 int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
                     XEN_GUEST_HANDLE_PARAM(void) u_domctl);
 
+/*
+ * "Final touch" vCPU function, should be called just before re-entering vCPUs,
+ * e.g. on x86 it is called by hvm_do_resume (scheduling tail) and
+ * vmx_vmenter_helper (before VMRESUME or VMLAUNCH).
+ */
+static inline void vm_event_vcpu_enter(struct vcpu *v)
+{
+    /* don't track idle vcpus, they're not subject to the vm-event subsystem */
+    if ( is_idle_vcpu(v) )
+        return;
+
+    arch_vm_event_vcpu_enter(v);
+}
+
 void vm_event_vcpu_pause(struct vcpu *v);
 void vm_event_vcpu_unpause(struct vcpu *v);
 
-- 
2.5.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.