|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [v2 06/16] xen/arm: Extend copy_to_guest to support copying from/to guest physical address
On Tue, 12 Dec 2017, Julien Grall wrote:
> The only differences between copy_to_guest and access_guest_memory_by_ipa are:
> - The latter does not support copying data crossing page boundary
> - The former is copying from/to guest VA whilst the latter from
> guest PA
>
> copy_to_guest can easily be extended to support copying from/to guest
> physical address. For that a new bit is used to tell whether linear
> address or ipa is been used.
>
> Lastly access_guest_memory_by_ipa is reimplemented using copy_to_guest.
> This also has the benefits to extend the use of it, it is now possible
> to copy data crossing page boundary.
>
> Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>
Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> ---
> Changes in v2:
> - Rework the patch after the interface changes in the previous
> patch.
> - Use uint64_t rather than paddr_t in translate_get_page
> - Add a BUILD_BUG_ON to check whether paddr_t fits in uint64_t
> ---
> xen/arch/arm/guestcopy.c | 91
> +++++++++++++++++++++++-------------------------
> 1 file changed, 44 insertions(+), 47 deletions(-)
>
> diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
> index 7e92e27beb..93e4aa2d3f 100644
> --- a/xen/arch/arm/guestcopy.c
> +++ b/xen/arch/arm/guestcopy.c
> @@ -8,6 +8,8 @@
> #define COPY_flush_dcache (1U << 0)
> #define COPY_from_guest (0U << 1)
> #define COPY_to_guest (1U << 1)
> +#define COPY_ipa (0U << 2)
> +#define COPY_linear (1U << 2)
>
> typedef union
> {
> @@ -15,9 +17,39 @@ typedef union
> {
> struct vcpu *v;
> } gva;
> +
> + struct
> + {
> + struct domain *d;
> + } gpa;
> } copy_info_t;
>
> #define GVA_INFO(vcpu) ((copy_info_t) { .gva = { vcpu } })
> +#define GPA_INFO(domain) ((copy_info_t) { .gpa = { domain } })
> +
> +static struct page_info *translate_get_page(copy_info_t info, uint64_t addr,
> + bool linear, bool write)
> +{
> + p2m_type_t p2mt;
> + struct page_info *page;
> +
> + if ( linear )
> + return get_page_from_gva(info.gva.v, addr,
> + write ? GV2M_WRITE : GV2M_READ);
> +
> + page = get_page_from_gfn(info.gpa.d, paddr_to_pfn(addr), &p2mt,
> P2M_ALLOC);
> +
> + if ( !page )
> + return NULL;
> +
> + if ( !p2m_is_ram(p2mt) )
> + {
> + put_page(page);
> + return NULL;
> + }
> +
> + return page;
> +}
>
> static unsigned long copy_guest(void *buf, uint64_t addr, unsigned int len,
> copy_info_t info, unsigned int flags)
> @@ -26,6 +58,7 @@ static unsigned long copy_guest(void *buf, uint64_t addr,
> unsigned int len,
> unsigned offset = addr & ~PAGE_MASK;
>
> BUILD_BUG_ON((sizeof(addr)) < sizeof(vaddr_t));
> + BUILD_BUG_ON((sizeof(addr)) < sizeof(paddr_t));
>
> while ( len )
> {
> @@ -33,8 +66,8 @@ static unsigned long copy_guest(void *buf, uint64_t addr,
> unsigned int len,
> unsigned size = min(len, (unsigned)PAGE_SIZE - offset);
> struct page_info *page;
>
> - page = get_page_from_gva(info.gva.v, addr,
> - (flags & COPY_to_guest) ? GV2M_WRITE :
> GV2M_READ);
> + page = translate_get_page(info, addr, flags & COPY_linear,
> + flags & COPY_to_guest);
> if ( page == NULL )
> return len;
>
> @@ -75,75 +108,39 @@ static unsigned long copy_guest(void *buf, uint64_t
> addr, unsigned int len,
> unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len)
> {
> return copy_guest((void *)from, (vaddr_t)to, len,
> - GVA_INFO(current), COPY_to_guest);
> + GVA_INFO(current), COPY_to_guest | COPY_linear);
> }
>
> unsigned long raw_copy_to_guest_flush_dcache(void *to, const void *from,
> unsigned len)
> {
> return copy_guest((void *)from, (vaddr_t)to, len, GVA_INFO(current),
> - COPY_to_guest | COPY_flush_dcache);
> + COPY_to_guest | COPY_flush_dcache | COPY_linear);
> }
>
> unsigned long raw_clear_guest(void *to, unsigned len)
> {
> return copy_guest(NULL, (vaddr_t)to, len, GVA_INFO(current),
> - COPY_to_guest);
> + COPY_to_guest | COPY_linear);
> }
>
> unsigned long raw_copy_from_guest(void *to, const void __user *from,
> unsigned len)
> {
> return copy_guest(to, (vaddr_t)from, len, GVA_INFO(current),
> - COPY_from_guest);
> + COPY_from_guest | COPY_linear);
> }
>
> -/*
> - * Temporarily map one physical guest page and copy data to or from it.
> - * The data to be copied cannot cross a page boundary.
> - */
> int access_guest_memory_by_ipa(struct domain *d, paddr_t gpa, void *buf,
> uint32_t size, bool is_write)
> {
> - struct page_info *page;
> - uint64_t offset = gpa & ~PAGE_MASK; /* Offset within the mapped page */
> - p2m_type_t p2mt;
> - void *p;
> -
> - /* Do not cross a page boundary. */
> - if ( size > (PAGE_SIZE - offset) )
> - {
> - printk(XENLOG_G_ERR "d%d: guestcopy: memory access crosses page
> boundary.\n",
> - d->domain_id);
> - return -EINVAL;
> - }
> -
> - page = get_page_from_gfn(d, paddr_to_pfn(gpa), &p2mt, P2M_ALLOC);
> - if ( !page )
> - {
> - printk(XENLOG_G_ERR "d%d: guestcopy: failed to get table entry.\n",
> - d->domain_id);
> - return -EINVAL;
> - }
> + unsigned long left;
> + int flags = COPY_ipa;
>
> - if ( !p2m_is_ram(p2mt) )
> - {
> - put_page(page);
> - printk(XENLOG_G_ERR "d%d: guestcopy: guest memory should be RAM.\n",
> - d->domain_id);
> - return -EINVAL;
> - }
> -
> - p = __map_domain_page(page);
> + flags |= is_write ? COPY_to_guest : COPY_from_guest;
>
> - if ( is_write )
> - memcpy(p + offset, buf, size);
> - else
> - memcpy(buf, p + offset, size);
> + left = copy_guest(buf, gpa, size, GPA_INFO(d), flags);
>
> - unmap_domain_page(p);
> - put_page(page);
> -
> - return 0;
> + return (!left) ? 0 : -EINVAL;
> }
>
> /*
> --
> 2.11.0
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |