[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH LINUX RFC] xen: privcmd: implement IOCTL_PRIVCMD_MMAPBATCH_V2_UNCACHED



On Wed, Dec 18, 2013 at 05:30:37PM +0000, Ian Campbell wrote:
> On ARM we want to use uncached foreign mappings while building the domain
> because the guests start with MMU and caches disabled.
> 

Why introduce a new ioctl? Could we piggyback on the old one and on ARM
do the uncached.

> Flushing the caches before launching the guest is problematic because there is
> a window between flush and unmap where the processor might speculatively fill
> a cache line. Using a non-cacheable mapping completely avoids this.
> 
> Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
> Cc: Stefano.Stabellini@xxxxxxxxxx>
> Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
> Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> Cc: David Vrabel <david.vrabel@xxxxxxxxxx>
> Cc: xen-devel@xxxxxxxxxxxxxxxxxxxx
> ---
>  drivers/xen/privcmd.c      |   15 ++++++++++-----
>  include/uapi/xen/privcmd.h |    2 ++
>  2 files changed, 12 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
> index 569a13b..b5561d1 100644
> --- a/drivers/xen/privcmd.c
> +++ b/drivers/xen/privcmd.c
> @@ -253,6 +253,7 @@ struct mmap_batch_state {
>       domid_t domain;
>       unsigned long va;
>       struct vm_area_struct *vma;
> +     pgprot_t prot;
>       int index;
>       /* A tristate:
>        *      0 for no errors
> @@ -285,8 +286,7 @@ static int mmap_batch_fn(void *data, void *state)
>               cur_page = pages[st->index++];
>  
>       ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
> -                                      st->vma->vm_page_prot, st->domain,
> -                                      &cur_page);
> +                                      st->prot, st->domain, &cur_page);
>  
>       /* Store error code for second pass. */
>       if (st->version == 1) {
> @@ -367,7 +367,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, 
> int numpgs)
>  
>  static struct vm_operations_struct privcmd_vm_ops;
>  
> -static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
> +static long privcmd_ioctl_mmap_batch(void __user *udata, int version, int 
> cached)
>  {
>       int ret;
>       struct privcmd_mmapbatch_v2 m;
> @@ -464,6 +464,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, 
> int version)
>  
>       state.domain        = m.dom;
>       state.vma           = vma;
> +     state.prot          = cached ? vma->vm_page_prot
> +                                  : pgprot_noncached(vma->vm_page_prot);
>       state.va            = m.addr;
>       state.index         = 0;
>       state.global_error  = 0;
> @@ -514,13 +516,16 @@ static long privcmd_ioctl(struct file *file,
>               break;
>  
>       case IOCTL_PRIVCMD_MMAPBATCH:
> -             ret = privcmd_ioctl_mmap_batch(udata, 1);
> +             ret = privcmd_ioctl_mmap_batch(udata, 1, 1);
>               break;
>  
>       case IOCTL_PRIVCMD_MMAPBATCH_V2:
> -             ret = privcmd_ioctl_mmap_batch(udata, 2);
> +             ret = privcmd_ioctl_mmap_batch(udata, 2, 1);
>               break;
>  
> +     case IOCTL_PRIVCMD_MMAPBATCH_V2_UNCACHED:
> +             ret = privcmd_ioctl_mmap_batch(udata, 2, 0);
> +             break;
>       default:
>               ret = -EINVAL;
>               break;
> diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
> index a853168..be7e72b 100644
> --- a/include/uapi/xen/privcmd.h
> +++ b/include/uapi/xen/privcmd.h
> @@ -94,5 +94,7 @@ struct privcmd_mmapbatch_v2 {
>       _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch))
>  #define IOCTL_PRIVCMD_MMAPBATCH_V2                           \
>       _IOC(_IOC_NONE, 'P', 4, sizeof(struct privcmd_mmapbatch_v2))
> +#define IOCTL_PRIVCMD_MMAPBATCH_V2_UNCACHED                  \
> +     _IOC(_IOC_NONE, 'P', 5, sizeof(struct privcmd_mmapbatch_v2))
>  
>  #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
> -- 
> 1.7.10.4
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.