[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 17/19] x86/hvm: Avoid __hvm_copy() raising #PF behind the emulators back



> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: 28 November 2016 11:14
> To: Xen-devel <xen-devel@xxxxxxxxxxxxx>
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Jan Beulich
> <JBeulich@xxxxxxxx>; Paul Durrant <Paul.Durrant@xxxxxxxxxx>; Tim
> (Xen.org) <tim@xxxxxxx>; Jun Nakajima <jun.nakajima@xxxxxxxxx>; Kevin
> Tian <kevin.tian@xxxxxxxxx>
> Subject: [PATCH v2 17/19] x86/hvm: Avoid __hvm_copy() raising #PF behind
> the emulators back
> 
> Drop the call to hvm_inject_page_fault() in __hvm_copy(), and require
> callers
> to inject the pagefault themselves.
> 
> No functional change.

That's not the way it looks on the face of it. You've indeed removed the call 
to hvm_inject_page_fault() but some of the callers now call 
x86_emul_pagefault(). I'd call that a functional change... clearly the change 
you intended, but still a functional change.

  Paul

> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> ---
> CC: Jan Beulich <JBeulich@xxxxxxxx>
> CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
> CC: Tim Deegan <tim@xxxxxxx>
> CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
> CC: Kevin Tian <kevin.tian@xxxxxxxxx>
> ---
>  xen/arch/x86/hvm/emulate.c        |  2 ++
>  xen/arch/x86/hvm/hvm.c            | 11 +++++++++--
>  xen/arch/x86/hvm/vmx/vvmx.c       | 20 +++++++++++++++-----
>  xen/arch/x86/mm/shadow/common.c   |  1 +
>  xen/include/asm-x86/hvm/support.h |  4 +---
>  5 files changed, 28 insertions(+), 10 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index efd6d32..f07c026 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -799,6 +799,7 @@ static int __hvmemul_read(
>      case HVMCOPY_okay:
>          break;
>      case HVMCOPY_bad_gva_to_gfn:
> +        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
>          return X86EMUL_EXCEPTION;
>      case HVMCOPY_bad_gfn_to_mfn:
>          if ( access_type == hvm_access_insn_fetch )
> @@ -905,6 +906,7 @@ static int hvmemul_write(
>      case HVMCOPY_okay:
>          break;
>      case HVMCOPY_bad_gva_to_gfn:
> +        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
>          return X86EMUL_EXCEPTION;
>      case HVMCOPY_bad_gfn_to_mfn:
>          return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 0);
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 37eaee2..ce77520 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2927,6 +2927,8 @@ void hvm_task_switch(
> 
>      rc = hvm_copy_from_guest_linear(
>          &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
> +    if ( rc == HVMCOPY_bad_gva_to_gfn )
> +        hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>      if ( rc != HVMCOPY_okay )
>          goto out;
> 
> @@ -2965,11 +2967,15 @@ void hvm_task_switch(
>                                    offsetof(typeof(tss), trace) -
>                                    offsetof(typeof(tss), eip),
>                                    PFEC_page_present, &pfinfo);
> +    if ( rc == HVMCOPY_bad_gva_to_gfn )
> +        hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>      if ( rc != HVMCOPY_okay )
>          goto out;
> 
>      rc = hvm_copy_from_guest_linear(
>          &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
> +    if ( rc == HVMCOPY_bad_gva_to_gfn )
> +        hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>      /*
>       * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
>       * functions knew we want RO access.
> @@ -3012,7 +3018,10 @@ void hvm_task_switch(
>                                        &tss.back_link, sizeof(tss.back_link), 
> 0,
>                                        &pfinfo);
>          if ( rc == HVMCOPY_bad_gva_to_gfn )
> +        {
> +            hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>              exn_raised = 1;
> +        }
>          else if ( rc != HVMCOPY_okay )
>              goto out;
>      }
> @@ -3114,8 +3123,6 @@ static enum hvm_copy_result __hvm_copy(
>                  {
>                      pfinfo->linear = addr;
>                      pfinfo->ec = pfec;
> -
> -                    hvm_inject_page_fault(pfec, addr);
>                  }
>                  return HVMCOPY_bad_gva_to_gfn;
>              }
> diff --git a/xen/arch/x86/hvm/vmx/vvmx.c
> b/xen/arch/x86/hvm/vmx/vvmx.c
> index fd7ea0a..e6e9ebd 100644
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -396,7 +396,6 @@ static int decode_vmx_inst(struct cpu_user_regs
> *regs,
>      struct vcpu *v = current;
>      union vmx_inst_info info;
>      struct segment_register seg;
> -    pagefault_info_t pfinfo;
>      unsigned long base, index, seg_base, disp, offset;
>      int scale, size;
> 
> @@ -451,10 +450,17 @@ static int decode_vmx_inst(struct cpu_user_regs
> *regs,
>                offset + size - 1 > seg.limit) )
>              goto gp_fault;
> 
> -        if ( poperandS != NULL &&
> -             hvm_copy_from_guest_linear(poperandS, base, size, 0, &pfinfo)
> -                  != HVMCOPY_okay )
> -            return X86EMUL_EXCEPTION;
> +        if ( poperandS != NULL )
> +        {
> +            pagefault_info_t pfinfo;
> +            int rc = hvm_copy_from_guest_linear(poperandS, base, size,
> +                                                0, &pfinfo);
> +
> +            if ( rc == HVMCOPY_bad_gva_to_gfn )
> +                hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
> +            if ( rc != HVMCOPY_okay )
> +                return X86EMUL_EXCEPTION;
> +        }
>          decode->mem = base;
>          decode->len = size;
>      }
> @@ -1623,6 +1629,8 @@ int nvmx_handle_vmptrst(struct cpu_user_regs
> *regs)
>      gpa = nvcpu->nv_vvmcxaddr;
> 
>      rc = hvm_copy_to_guest_linear(decode.mem, &gpa, decode.len, 0,
> &pfinfo);
> +    if ( rc == HVMCOPY_bad_gva_to_gfn )
> +        hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>      if ( rc != HVMCOPY_okay )
>          return X86EMUL_EXCEPTION;
> 
> @@ -1694,6 +1702,8 @@ int nvmx_handle_vmread(struct cpu_user_regs
> *regs)
>      switch ( decode.type ) {
>      case VMX_INST_MEMREG_TYPE_MEMORY:
>          rc = hvm_copy_to_guest_linear(decode.mem, &value, decode.len, 0,
> &pfinfo);
> +        if ( rc == HVMCOPY_bad_gva_to_gfn )
> +            hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>          if ( rc != HVMCOPY_okay )
>              return X86EMUL_EXCEPTION;
>          break;
> diff --git a/xen/arch/x86/mm/shadow/common.c
> b/xen/arch/x86/mm/shadow/common.c
> index 0760e76..fbe49e1 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -198,6 +198,7 @@ hvm_read(enum x86_segment seg,
>      case HVMCOPY_okay:
>          return X86EMUL_OKAY;
>      case HVMCOPY_bad_gva_to_gfn:
> +        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &sh_ctxt->ctxt);
>          return X86EMUL_EXCEPTION;
>      case HVMCOPY_bad_gfn_to_mfn:
>      case HVMCOPY_unhandleable:
> diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-
> x86/hvm/support.h
> index 78349f8..3d767d7 100644
> --- a/xen/include/asm-x86/hvm/support.h
> +++ b/xen/include/asm-x86/hvm/support.h
> @@ -85,9 +85,7 @@ enum hvm_copy_result hvm_copy_from_guest_phys(
>   *  HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a
> valid
>   *                          mapping to a guest physical address.  The
>   *                          pagefault_info_t structure will be filled in if
> - *                          provided, and a page fault exception is
> - *                          automatically queued for injection into the
> - *                          current HVM VCPU.
> + *                          provided.
>   */
>  typedef struct pagefault_info
>  {
> --
> 2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.