[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v6] hvm/svm: Implement Debug events



On Mon, Mar 26, 2018 at 3:43 AM, Alexandru Isaila
<aisaila@xxxxxxxxxxxxxxx> wrote:
> At this moment the Debug events for the AMD architecture are not
> forwarded to the monitor layer.
>
> This patch adds the Debug event to the common capabilities, adds
> the VMEXIT_ICEBP then forwards the event to the monitor layer.
>
> Chapter 2: SVM Processor and Platform Extensions: "Note: A vector 1
> exception generated by the single byte INT1
> instruction (also known as ICEBP) does not trigger the #DB
> intercept. Software should use the dedicated ICEBP
> intercept to intercept ICEBP"
>
> Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>

Monitor bits:
Acked-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>

>
> ---
> Changes since V5:
>         -Reformulate if in case VMEXIT_ICEBP
> ---
>  xen/arch/x86/hvm/svm/emulate.c        |  1 +
>  xen/arch/x86/hvm/svm/svm.c            | 65 
> ++++++++++++++++++++++++++---------
>  xen/arch/x86/monitor.c                |  3 ++
>  xen/include/asm-x86/hvm/hvm.h         | 25 ++++++++++++++
>  xen/include/asm-x86/hvm/svm/emulate.h |  1 +
>  xen/include/asm-x86/monitor.h         |  4 +--
>  6 files changed, 81 insertions(+), 18 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/svm/emulate.c b/xen/arch/x86/hvm/svm/emulate.c
> index e1a1581..535674e 100644
> --- a/xen/arch/x86/hvm/svm/emulate.c
> +++ b/xen/arch/x86/hvm/svm/emulate.c
> @@ -65,6 +65,7 @@ static const struct {
>  } opc_tab[INSTR_MAX_COUNT] = {
>      [INSTR_PAUSE]   = { X86EMUL_OPC_F3(0, 0x90) },
>      [INSTR_INT3]    = { X86EMUL_OPC(   0, 0xcc) },
> +    [INSTR_ICEBP]   = { X86EMUL_OPC(   0, 0xf1) },
>      [INSTR_HLT]     = { X86EMUL_OPC(   0, 0xf4) },
>      [INSTR_XSETBV]  = { X86EMUL_OPC(0x0f, 0x01), MODRM(3, 2, 1) },
>      [INSTR_VMRUN]   = { X86EMUL_OPC(0x0f, 0x01), MODRM(3, 3, 0) },
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index c34f5b5..1bdb8e0 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -172,6 +172,24 @@ static void svm_enable_msr_interception(struct domain 
> *d, uint32_t msr)
>          svm_intercept_msr(v, msr, MSR_INTERCEPT_WRITE);
>  }
>
> +static void svm_set_icebp_interception(struct domain *d, bool enable)
> +{
> +    const struct vcpu *v;
> +
> +    for_each_vcpu ( d, v )
> +    {
> +        struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
> +        uint32_t intercepts = vmcb_get_general2_intercepts(vmcb);
> +
> +        if ( enable )
> +            intercepts |= GENERAL2_INTERCEPT_ICEBP;
> +        else
> +            intercepts &= ~GENERAL2_INTERCEPT_ICEBP;
> +
> +        vmcb_set_general2_intercepts(vmcb, intercepts);
> +    }
> +}
> +
>  static void svm_save_dr(struct vcpu *v)
>  {
>      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
> @@ -1109,7 +1127,8 @@ static void noreturn svm_do_resume(struct vcpu *v)
>  {
>      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
>      bool debug_state = (v->domain->debugger_attached ||
> -                        v->domain->arch.monitor.software_breakpoint_enabled);
> +                        v->domain->arch.monitor.software_breakpoint_enabled 
> ||
> +                        v->domain->arch.monitor.debug_exception_enabled);
>      bool_t vcpu_guestmode = 0;
>      struct vlapic *vlapic = vcpu_vlapic(v);
>
> @@ -2438,19 +2457,6 @@ static bool svm_get_pending_event(struct vcpu *v, 
> struct x86_event *info)
>      return true;
>  }
>
> -static void svm_propagate_intr(struct vcpu *v, unsigned long insn_len)
> -{
> -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
> -    struct x86_event event = {
> -        .vector = vmcb->eventinj.fields.type,
> -        .type = vmcb->eventinj.fields.type,
> -        .error_code = vmcb->exitinfo1,
> -    };
> -
> -    event.insn_len = insn_len;
> -    hvm_inject_event(&event);
> -}
> -
>  static struct hvm_function_table __initdata svm_function_table = {
>      .name                 = "SVM",
>      .cpu_up_prepare       = svm_cpu_up_prepare,
> @@ -2490,6 +2496,7 @@ static struct hvm_function_table __initdata 
> svm_function_table = {
>      .msr_read_intercept   = svm_msr_read_intercept,
>      .msr_write_intercept  = svm_msr_write_intercept,
>      .enable_msr_interception = svm_enable_msr_interception,
> +    .set_icebp_interception = svm_set_icebp_interception,
>      .set_rdtsc_exiting    = svm_set_rdtsc_exiting,
>      .set_descriptor_access_exiting = svm_set_descriptor_access_exiting,
>      .get_insn_bytes       = svm_get_insn_bytes,
> @@ -2656,9 +2663,33 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
>          HVMTRACE_0D(SMI);
>          break;
>
> +    case VMEXIT_ICEBP:
>      case VMEXIT_EXCEPTION_DB:
>          if ( !v->domain->debugger_attached )
> -            hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
> +        {
> +            int rc;
> +            unsigned int trap_type;
> +
> +            if ( likely(exit_reason != VMEXIT_ICEBP) )
> +            {
> +                trap_type = X86_EVENTTYPE_HW_EXCEPTION;
> +                inst_len = 0;
> +            }
> +            else
> +            {
> +                trap_type = X86_EVENTTYPE_PRI_SW_EXCEPTION;
> +                inst_len = __get_instruction_length(v, INSTR_ICEBP);
> +            }
> +
> +            rc = hvm_monitor_debug(regs->rip,
> +                                   HVM_MONITOR_DEBUG_EXCEPTION,
> +                                   trap_type, inst_len);
> +            if ( rc < 0 )
> +                goto unexpected_exit_type;
> +            if ( !rc )
> +                hvm_inject_exception(TRAP_debug,
> +                                     trap_type, inst_len, X86_EVENT_NO_EC);
> +        }
>          else
>              domain_pause_for_debugger();
>          break;
> @@ -2687,7 +2718,9 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
>             if ( rc < 0 )
>                 goto unexpected_exit_type;
>             if ( !rc )
> -               svm_propagate_intr(v, inst_len);
> +               hvm_inject_exception(TRAP_int3,
> +                                    X86_EVENTTYPE_SW_EXCEPTION,
> +                                    inst_len, X86_EVENT_NO_EC);
>          }
>          break;
>
> diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c
> index 4317658..3fb6531 100644
> --- a/xen/arch/x86/monitor.c
> +++ b/xen/arch/x86/monitor.c
> @@ -288,6 +288,9 @@ int arch_monitor_domctl_event(struct domain *d,
>          ad->monitor.debug_exception_sync = requested_status ?
>                                              mop->u.debug_exception.sync :
>                                              0;
> +
> +        hvm_set_icebp_interception(d, requested_status);
> +
>          domain_unpause(d);
>          break;
>      }
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
> index 2376ed6..0775d0c 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -209,6 +209,7 @@ struct hvm_function_table {
>                                  bool_t access_w, bool_t access_x);
>
>      void (*enable_msr_interception)(struct domain *d, uint32_t msr);
> +    void (*set_icebp_interception)(struct domain *d, bool enable);
>      bool_t (*is_singlestep_supported)(void);
>
>      /* Alternate p2m */
> @@ -407,6 +408,20 @@ void hvm_migrate_pirqs(struct vcpu *v);
>
>  void hvm_inject_event(const struct x86_event *event);
>
> +static inline void hvm_inject_exception(
> +    unsigned int vector, unsigned int type,
> +    unsigned int insn_len, int error_code)
> +{
> +    struct x86_event event = {
> +        .vector = vector,
> +        .type = type,
> +        .insn_len = insn_len,
> +        .error_code = error_code,
> +    };
> +
> +    hvm_inject_event(&event);
> +}
> +
>  static inline void hvm_inject_hw_exception(unsigned int vector, int errcode)
>  {
>      struct x86_event event = {
> @@ -581,6 +596,16 @@ static inline bool_t hvm_enable_msr_interception(struct 
> domain *d, uint32_t msr)
>      return 0;
>  }
>
> +static inline bool hvm_set_icebp_interception(struct domain *d, bool enable)
> +{
> +    if ( hvm_funcs.set_icebp_interception )
> +    {
> +        hvm_funcs.set_icebp_interception(d, enable);
> +        return true;
> +    }
> +    return false;
> +}
> +
>  static inline bool_t hvm_is_singlestep_supported(void)
>  {
>      return (hvm_funcs.is_singlestep_supported &&
> diff --git a/xen/include/asm-x86/hvm/svm/emulate.h 
> b/xen/include/asm-x86/hvm/svm/emulate.h
> index 7c1dcd1..3de8236 100644
> --- a/xen/include/asm-x86/hvm/svm/emulate.h
> +++ b/xen/include/asm-x86/hvm/svm/emulate.h
> @@ -38,6 +38,7 @@ enum instruction_index {
>      INSTR_STGI,
>      INSTR_CLGI,
>      INSTR_INVLPGA,
> +    INSTR_ICEBP,
>      INSTR_MAX_COUNT /* Must be last - Number of instructions supported */
>  };
>
> diff --git a/xen/include/asm-x86/monitor.h b/xen/include/asm-x86/monitor.h
> index 99ed4b87..c5a86d1 100644
> --- a/xen/include/asm-x86/monitor.h
> +++ b/xen/include/asm-x86/monitor.h
> @@ -82,12 +82,12 @@ static inline uint32_t 
> arch_monitor_get_capabilities(struct domain *d)
>                      (1U << XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR) |
>                      (1U << XEN_DOMCTL_MONITOR_EVENT_INTERRUPT) |
>                      (1U << XEN_DOMCTL_MONITOR_EVENT_CPUID) |
> +                    (1U << XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION) |
>                      (1U << XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG));
>
>      if ( cpu_has_vmx )
>      {
> -        capabilities |= ((1U << XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION) |
> -                         (1U << 
> XEN_DOMCTL_MONITOR_EVENT_EMUL_UNIMPLEMENTED));
> +        capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_EMUL_UNIMPLEMENTED);
>
>          /* Since we know this is on VMX, we can just call the hvm func */
>          if ( hvm_is_singlestep_supported() )
> --
> 2.7.4

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.