[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH for-next 06/16] xen/arm: Extend copy_to_guest to support copying from/to guest physical address



On Tue, 5 Dec 2017, Stefano Stabellini wrote:
> On Thu, 23 Nov 2017, Julien Grall wrote:
> > The only differences between copy_to_guest and access_guest_memory_by_ipa 
> > are:
> >     - The latter does not support copying data crossing page boundary
> >     - The former is copying from/to guest VA whilst the latter from
> >     guest PA
> > 
> > copy_to_guest can easily be extended to support copying from/to guest
> > physical address. For that a new bit is used to tell whether linear
> > address or ipa is been used.
> > 
> > Lastly access_guest_memory_by_ipa is reimplemented using copy_to_guest.
> > This also has the benefits to extend the use of it, it is now possible
> > to copy data crossing page boundary.
> > 
> > Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>
> 
> Ah! This is the reason why previous patches were not using vaddr_t. It
> makes sense now. May I suggest we use something different from paddr_t
> in copy_guest for addr type? I don't think is correct to specify addr as
> paddr_t when it could be vaddr_t; in the future we could have type
> checks on them.
> 
> I suggest we specify it as u64, but if you have a better idea go for it.
> 
> 
> > ---
> >  xen/arch/arm/guestcopy.c | 86 
> > ++++++++++++++++++++++--------------------------
> >  1 file changed, 39 insertions(+), 47 deletions(-)
> > 
> > diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
> > index 487f5ab82d..be53bee559 100644
> > --- a/xen/arch/arm/guestcopy.c
> > +++ b/xen/arch/arm/guestcopy.c
> > @@ -8,6 +8,31 @@
> >  #define COPY_flush_dcache   (1U << 0)
> >  #define COPY_from_guest     (0U << 1)
> >  #define COPY_to_guest       (1U << 1)
> > +#define COPY_ipa            (0U << 2)
> > +#define COPY_linear         (1U << 2)
> > +
> > +static struct page_info *translate_get_page(struct vcpu *v, paddr_t addr,
> > +                                            bool linear, bool write)
> > +{
> > +    p2m_type_t p2mt;
> > +    struct page_info *page;
> > +
> > +    if ( linear )
> > +        return get_page_from_gva(v, addr, write ? GV2M_WRITE : GV2M_READ);
> > +
> > +    page = get_page_from_gfn(v->domain, paddr_to_pfn(addr), &p2mt, 
> > P2M_ALLOC);
> > +
> > +    if ( !page )
> > +        return NULL;
> > +
> > +    if ( !p2m_is_ram(p2mt) )
> > +    {
> > +        put_page(page);
> > +        return NULL;
> > +    }
> > +
> > +    return page;
> > +}
> >  
> >  static unsigned long copy_guest(void *buf, paddr_t addr, unsigned int len,
> >                                  struct vcpu *v, unsigned int flags)
> > @@ -21,8 +46,8 @@ static unsigned long copy_guest(void *buf, paddr_t addr, 
> > unsigned int len,
> >          unsigned size = min(len, (unsigned)PAGE_SIZE - offset);
> >          struct page_info *page;
> >  
> > -        page = get_page_from_gva(v, addr,
> > -                                 (flags & COPY_to_guest) ? GV2M_WRITE : 
> > GV2M_READ);
> > +        page = translate_get_page(v, addr, flags & COPY_linear,
> > +                                  flags & COPY_to_guest);
> >          if ( page == NULL )
> >              return len;
> >  
> > @@ -63,73 +88,40 @@ static unsigned long copy_guest(void *buf, paddr_t 
> > addr, unsigned int len,
> >  unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len)
> >  {
> >      return copy_guest((void *)from, (unsigned long)to, len,
> > -                      current, COPY_to_guest);
> > +                      current, COPY_to_guest | COPY_linear);
> >  }
> >  
> >  unsigned long raw_copy_to_guest_flush_dcache(void *to, const void *from,
> >                                               unsigned len)
> >  {
> >      return copy_guest((void *)from, (unsigned long)to, len,
> > -                      current, COPY_to_guest | COPY_flush_dcache);
> > +                      current, COPY_to_guest | COPY_flush_dcache | 
> > COPY_linear);
> >  }
> >  
> >  unsigned long raw_clear_guest(void *to, unsigned len)
> >  {
> > -    return copy_guest(NULL, (unsigned long)to, len, current, 
> > COPY_to_guest);
> > +    return copy_guest(NULL, (unsigned long)to, len, current,
> > +                      COPY_to_guest | COPY_linear);
> >  }
> >  
> >  unsigned long raw_copy_from_guest(void *to, const void __user *from, 
> > unsigned len)
> >  {
> > -    return copy_guest(to, (unsigned long)from, len, current, 
> > COPY_from_guest);
> > +    return copy_guest(to, (unsigned long)from, len, current,
> > +                      COPY_from_guest | COPY_linear);
> >  }
> >  
> > -/*
> > - * Temporarily map one physical guest page and copy data to or from it.
> > - * The data to be copied cannot cross a page boundary.
> > - */
> >  int access_guest_memory_by_ipa(struct domain *d, paddr_t gpa, void *buf,
> >                                 uint32_t size, bool is_write)
> >  {
> > -    struct page_info *page;
> > -    uint64_t offset = gpa & ~PAGE_MASK;  /* Offset within the mapped page 
> > */
> > -    p2m_type_t p2mt;
> > -    void *p;
> > -
> > -    /* Do not cross a page boundary. */
> > -    if ( size > (PAGE_SIZE - offset) )
> > -    {
> > -        printk(XENLOG_G_ERR "d%d: guestcopy: memory access crosses page 
> > boundary.\n",
> > -               d->domain_id);
> > -        return -EINVAL;
> > -    }
> 
> I don't know if we necessarely care about this, but with this change
> this error path goes away. Do we want to keep it?
> 
> 
> > -    page = get_page_from_gfn(d, paddr_to_pfn(gpa), &p2mt, P2M_ALLOC);
> > -    if ( !page )
> > -    {
> > -        printk(XENLOG_G_ERR "d%d: guestcopy: failed to get table entry.\n",
> > -               d->domain_id);
> > -        return -EINVAL;
> > -    }
> > -
> > -    if ( !p2m_is_ram(p2mt) )
> > -    {
> > -        put_page(page);
> > -        printk(XENLOG_G_ERR "d%d: guestcopy: guest memory should be 
> > RAM.\n",
> > -               d->domain_id);
> > -        return -EINVAL;
> > -    }
> > +    unsigned long left;
> > +    int flags = COPY_ipa;
> >  
> > -    p = __map_domain_page(page);
> > +    flags |= is_write ? COPY_to_guest : COPY_from_guest;
> >  
> > -    if ( is_write )
> > -        memcpy(p + offset, buf, size);
> > -    else
> > -        memcpy(buf, p + offset, size);
> > +    /* P2M is shared between all vCPUs, so the vcpu used does not matter. 
> > */
> > +    left = copy_guest(buf, gpa, size, d->vcpu[0], flags);
> 
> fair enough, then why not use current?

Because it could be called from another domain, of course. Makes sense.

 
> > -    unmap_domain_page(p);
> > -    put_page(page);
> > -
> > -    return 0;
> > +    return (!left) ? 0 : -EINVAL;
> >  }
> >  
> >  /*
> > -- 
> > 2.11.0
> > 
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.