[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v6] x86/hvm: add vcpu parameter to guest memory copy function



>>> On 07.02.17 at 18:35, <roger.pau@xxxxxxxxxx> wrote:
> Current __hvm_copy assumes that the destination memory belongs to the current
> vcpu, but this is not always the case since for PVHv2 Dom0 build hvm copy
> functions are used with current being the idle vcpu. Add a new vcpu 
> parameter
> to hvm copy in order to solve that. Note that only hvm_copy_to_guest_phys is
> changed to take a vcpu parameter, because that's the only one at the moment
> that's required in order to build a PVHv2 Dom0.
> 
> While there, also assert that the passed vcpu belongs to a HVM guest.
> 
> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>

Various Cc-s missing here, btw.

Jan

> ---
> Changes since v5:
>  - Name vcpu param 'v' in __hvm_copy and move it's position in the parameter
>    list.
>  - Do not call hvm_mmio_internal if v != current.
>  - Remove hvm_copy_to_guest_phys_vcpu and instead modify 
> hvm_copy_to_guest_phys
>    to take a vcpu parameter.
>  - Fix parameter passed to %pv printk modifier.
> 
> Changes since v4:
>  - New in the series.
> ---
>  xen/arch/x86/hvm/emulate.c        |  4 ++--
>  xen/arch/x86/hvm/hvm.c            | 35 ++++++++++++++++++-----------------
>  xen/arch/x86/hvm/intercept.c      |  2 +-
>  xen/arch/x86/hvm/vmx/realmode.c   |  2 +-
>  xen/include/asm-x86/hvm/support.h |  2 +-
>  5 files changed, 23 insertions(+), 22 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index 0d21fe1..fed8801 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1294,7 +1294,7 @@ static int hvmemul_rep_movs(
>          rc = hvm_copy_from_guest_phys(buf, sgpa, bytes);
>  
>      if ( rc == HVMCOPY_okay )
> -        rc = hvm_copy_to_guest_phys(dgpa, buf, bytes);
> +        rc = hvm_copy_to_guest_phys(dgpa, buf, bytes, current);
>  
>      xfree(buf);
>  
> @@ -1405,7 +1405,7 @@ static int hvmemul_rep_stos(
>          if ( df )
>              gpa -= bytes - bytes_per_rep;
>  
> -        rc = hvm_copy_to_guest_phys(gpa, buf, bytes);
> +        rc = hvm_copy_to_guest_phys(gpa, buf, bytes, current);
>  
>          if ( buf != p_data )
>              xfree(buf);
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 21a1649..42ff9ff 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3082,16 +3082,17 @@ void hvm_task_switch(
>  #define HVMCOPY_phys       (0u<<2)
>  #define HVMCOPY_linear     (1u<<2)
>  static enum hvm_copy_result __hvm_copy(
> -    void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec,
> -    pagefault_info_t *pfinfo)
> +    void *buf, paddr_t addr, int size, struct vcpu *v, unsigned int flags,
> +    uint32_t pfec, pagefault_info_t *pfinfo)
>  {
> -    struct vcpu *curr = current;
>      unsigned long gfn;
>      struct page_info *page;
>      p2m_type_t p2mt;
>      char *p;
>      int count, todo = size;
>  
> +    ASSERT(has_hvm_container_vcpu(v));
> +
>      /*
>       * XXX Disable for 4.1.0: PV-on-HVM drivers will do grant-table ops
>       * such as query_size. Grant-table code currently does 
> copy_to/from_guest
> @@ -3116,7 +3117,7 @@ static enum hvm_copy_result __hvm_copy(
>  
>          if ( flags & HVMCOPY_linear )
>          {
> -            gfn = paging_gva_to_gfn(curr, addr, &pfec);
> +            gfn = paging_gva_to_gfn(v, addr, &pfec);
>              if ( gfn == gfn_x(INVALID_GFN) )
>              {
>                  if ( pfec & PFEC_page_paged )
> @@ -3143,12 +3144,12 @@ static enum hvm_copy_result __hvm_copy(
>           * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
>           * - newer Windows (like Server 2012) for HPET accesses.
>           */
> -        if ( !nestedhvm_vcpu_in_guestmode(curr)
> -             && is_hvm_vcpu(curr)
> +        if ( !nestedhvm_vcpu_in_guestmode(v)
> +             && is_hvm_vcpu(v) && v == current
>               && hvm_mmio_internal(gpa) )
>              return HVMCOPY_bad_gfn_to_mfn;
>  
> -        page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE);
> +        page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE);
>  
>          if ( !page )
>              return HVMCOPY_bad_gfn_to_mfn;
> @@ -3156,7 +3157,7 @@ static enum hvm_copy_result __hvm_copy(
>          if ( p2m_is_paging(p2mt) )
>          {
>              put_page(page);
> -            p2m_mem_paging_populate(curr->domain, gfn);
> +            p2m_mem_paging_populate(v->domain, gfn);
>              return HVMCOPY_gfn_paged_out;
>          }
>          if ( p2m_is_shared(p2mt) )
> @@ -3178,9 +3179,9 @@ static enum hvm_copy_result __hvm_copy(
>              {
>                  static unsigned long lastpage;
>                  if ( xchg(&lastpage, gfn) != gfn )
> -                    gdprintk(XENLOG_DEBUG, "guest attempted write to 
> read-only"
> +                    printk(XENLOG_DEBUG, "%pv guest attempted write to 
> read-only"
>                               " memory page. gfn=%#lx, mfn=%#lx\n",
> -                             gfn, page_to_mfn(page));
> +                             v, gfn, page_to_mfn(page));
>              }
>              else
>              {
> @@ -3188,7 +3189,7 @@ static enum hvm_copy_result __hvm_copy(
>                      memcpy(p, buf, count);
>                  else
>                      memset(p, 0, count);
> -                paging_mark_dirty(curr->domain, _mfn(page_to_mfn(page)));
> +                paging_mark_dirty(v->domain, _mfn(page_to_mfn(page)));
>              }
>          }
>          else
> @@ -3209,16 +3210,16 @@ static enum hvm_copy_result __hvm_copy(
>  }
>  
>  enum hvm_copy_result hvm_copy_to_guest_phys(
> -    paddr_t paddr, void *buf, int size)
> +    paddr_t paddr, void *buf, int size, struct vcpu *v)
>  {
> -    return __hvm_copy(buf, paddr, size,
> +    return __hvm_copy(buf, paddr, size, v,
>                        HVMCOPY_to_guest | HVMCOPY_phys, 0, NULL);
>  }
>  
>  enum hvm_copy_result hvm_copy_from_guest_phys(
>      void *buf, paddr_t paddr, int size)
>  {
> -    return __hvm_copy(buf, paddr, size,
> +    return __hvm_copy(buf, paddr, size, current,
>                        HVMCOPY_from_guest | HVMCOPY_phys, 0, NULL);
>  }
>  
> @@ -3226,7 +3227,7 @@ enum hvm_copy_result hvm_copy_to_guest_linear(
>      unsigned long addr, void *buf, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo)
>  {
> -    return __hvm_copy(buf, addr, size,
> +    return __hvm_copy(buf, addr, size, current,
>                        HVMCOPY_to_guest | HVMCOPY_linear,
>                        PFEC_page_present | PFEC_write_access | pfec, 
> pfinfo);
>  }
> @@ -3235,7 +3236,7 @@ enum hvm_copy_result hvm_copy_from_guest_linear(
>      void *buf, unsigned long addr, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo)
>  {
> -    return __hvm_copy(buf, addr, size,
> +    return __hvm_copy(buf, addr, size, current,
>                        HVMCOPY_from_guest | HVMCOPY_linear,
>                        PFEC_page_present | pfec, pfinfo);
>  }
> @@ -3244,7 +3245,7 @@ enum hvm_copy_result hvm_fetch_from_guest_linear(
>      void *buf, unsigned long addr, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo)
>  {
> -    return __hvm_copy(buf, addr, size,
> +    return __hvm_copy(buf, addr, size, current,
>                        HVMCOPY_from_guest | HVMCOPY_linear,
>                        PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
>  }
> diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
> index 721fb38..5157e9e 100644
> --- a/xen/arch/x86/hvm/intercept.c
> +++ b/xen/arch/x86/hvm/intercept.c
> @@ -135,7 +135,7 @@ int hvm_process_io_intercept(const struct hvm_io_handler 
> *handler,
>              if ( p->data_is_ptr )
>              {
>                  switch ( hvm_copy_to_guest_phys(p->data + step * i,
> -                                                &data, p->size) )
> +                                                &data, p->size, current) )
>                  {
>                  case HVMCOPY_okay:
>                      break;
> diff --git a/xen/arch/x86/hvm/vmx/realmode.c 
> b/xen/arch/x86/hvm/vmx/realmode.c
> index 7ef4e45..40efad2 100644
> --- a/xen/arch/x86/hvm/vmx/realmode.c
> +++ b/xen/arch/x86/hvm/vmx/realmode.c
> @@ -77,7 +77,7 @@ static void realmode_deliver_exception(
>          pstk = regs->sp -= 6;
>  
>      pstk += hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->base;
> -    (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame));
> +    (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame), current);
>  
>      csr->sel  = cs_eip >> 16;
>      csr->base = (uint32_t)csr->sel << 4;
> diff --git a/xen/include/asm-x86/hvm/support.h 
> b/xen/include/asm-x86/hvm/support.h
> index ba5899c..16550c5 100644
> --- a/xen/include/asm-x86/hvm/support.h
> +++ b/xen/include/asm-x86/hvm/support.h
> @@ -68,7 +68,7 @@ enum hvm_copy_result {
>   * address range does not map entirely onto ordinary machine memory.
>   */
>  enum hvm_copy_result hvm_copy_to_guest_phys(
> -    paddr_t paddr, void *buf, int size);
> +    paddr_t paddr, void *buf, int size, struct vcpu *v);
>  enum hvm_copy_result hvm_copy_from_guest_phys(
>      void *buf, paddr_t paddr, int size);
>  
> -- 
> 2.10.1 (Apple Git-78)
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx 
> https://lists.xen.org/xen-devel 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.