[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [XEN PATCH v1 1/1] x86/vm_event: add fast single step
On break point event eight context switches occures. With fast single step it is possible to shorten path for two context switches and gain 35% spead-up. Was tested on Debian branch of Xen 4.12. See at: https://github.com/skvl/xen/tree/debian/knorrie/4.12/fast-singlestep Rebased on master: https://github.com/skvl/xen/tree/fast-singlestep Signed-off-by: Sergey Kovalev <valor@xxxxxxx> --- xen/arch/x86/hvm/hvm.c | 12 ++++++++++++ xen/arch/x86/hvm/monitor.c | 9 +++++++++ xen/arch/x86/vm_event.c | 8 ++++++-- xen/include/asm-x86/hvm/hvm.h | 1 + xen/include/asm-x86/hvm/vcpu.h | 4 ++++ xen/include/public/vm_event.h | 10 ++++++++++ 6 files changed, 42 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 47573f71b8..4999569503 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -5126,6 +5126,18 @@ void hvm_toggle_singlestep(struct vcpu *v) v->arch.hvm.single_step = !v->arch.hvm.single_step; } +void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx) +{ + ASSERT(atomic_read(&v->pause_count)); + + if ( !hvm_is_singlestep_supported() ) + return; + + v->arch.hvm.single_step = true; + v->arch.hvm.fast_single_step.enabled = true; + v->arch.hvm.fast_single_step.p2midx = p2midx; +} + /* * Segment caches in VMCB/VMCS are inconsistent about which bits are checked, * important, and preserved across vmentry/exit. Cook the values to make them diff --git a/xen/arch/x86/hvm/monitor.c b/xen/arch/x86/hvm/monitor.c index 1f23fe25e8..8c05e33922 100644 --- a/xen/arch/x86/hvm/monitor.c +++ b/xen/arch/x86/hvm/monitor.c @@ -28,6 +28,7 @@ #include <asm/hvm/monitor.h> #include <asm/altp2m.h> #include <asm/monitor.h> +#include <asm/p2m.h> #include <asm/paging.h> #include <asm/vm_event.h> #include <public/vm_event.h> @@ -159,6 +160,14 @@ int hvm_monitor_debug(unsigned long rip, enum hvm_monitor_debug_type type, case HVM_MONITOR_SINGLESTEP_BREAKPOINT: if ( !ad->monitor.singlestep_enabled ) return 0; + if (curr->arch.hvm.fast_single_step.enabled) + { + p2m_altp2m_check(curr, curr->arch.hvm.fast_single_step.p2midx); + curr->arch.hvm.single_step = false; + curr->arch.hvm.fast_single_step.enabled = false; + curr->arch.hvm.fast_single_step.p2midx = 0; + return 0; + } req.reason = VM_EVENT_REASON_SINGLESTEP; req.u.singlestep.gfn = gfn_of_rip(rip); sync = true; diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c index 52c2a71fa0..385116b5f2 100644 --- a/xen/arch/x86/vm_event.c +++ b/xen/arch/x86/vm_event.c @@ -61,7 +61,8 @@ void vm_event_cleanup_domain(struct domain *d) void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v, vm_event_response_t *rsp) { - if ( !(rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP) ) + if ( !(rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP + || rsp->flags & VM_EVENT_FLAG_FAST_SINGLESTEP) ) return; if ( !is_hvm_domain(d) ) @@ -69,7 +70,10 @@ void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v, ASSERT(atomic_read(&v->vm_event_pause_count)); - hvm_toggle_singlestep(v); + if (rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP) + hvm_toggle_singlestep(v); + else + hvm_fast_singlestep(v, rsp->u.fast_singlestep.p2midx); } void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp) diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 1d7b66f927..09793c12e9 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -323,6 +323,7 @@ int hvm_debug_op(struct vcpu *v, int32_t op); /* Caller should pause vcpu before calling this function */ void hvm_toggle_singlestep(struct vcpu *v); +void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx); int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, struct npfec npfec); diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h index 38f5c2bb9b..8b84941111 100644 --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -172,6 +172,10 @@ struct hvm_vcpu { bool flag_dr_dirty; bool debug_state_latch; bool single_step; + struct { + bool enabled; + uint16_t p2midx; + } fast_single_step; struct hvm_vcpu_asid n1asid; diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h index aa54c86325..cb577a7ba9 100644 --- a/xen/include/public/vm_event.h +++ b/xen/include/public/vm_event.h @@ -110,6 +110,11 @@ * interrupt pending after resuming the VCPU. */ #define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10) +/* + * Execute fast singlestepping on vm_event response. + * Requires the vCPU to be paused already (synchronous events only). + */ +#define VM_EVENT_FLAG_FAST_SINGLESTEP (1 << 11) /* * Reasons for the vm event request @@ -276,6 +281,10 @@ struct vm_event_singlestep { uint64_t gfn; }; +struct vm_event_fast_singlestep { + uint16_t p2midx; +}; + struct vm_event_debug { uint64_t gfn; uint32_t insn_length; @@ -363,6 +372,7 @@ typedef struct vm_event_st { struct vm_event_mov_to_msr mov_to_msr; struct vm_event_desc_access desc_access; struct vm_event_singlestep singlestep; + struct vm_event_fast_singlestep fast_singlestep; struct vm_event_debug software_breakpoint; struct vm_event_debug debug_exception; struct vm_event_cpuid cpuid; -- 2.20.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |