[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [linux-2.6.18-xen] privcmd: add new (replacement) mmap-batch ioctl
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1263370311 0 # Node ID 6d6c3dd995c07527b94d5278b0df4f5252888ccd # Parent 6061d56155223f92362420ec9a5a4dbe933efa60 privcmd: add new (replacement) mmap-batch ioctl While the error indicator of IOCTL_PRIVCMD_MMAPBATCH should be in the top nibble (it is documented that way in include/xen/public/privcmd.h and include/xen/compat_ioctl.h), it really wasn't for 64-bit implementations. With MFNs now possibly being 32 or more bits wide on x86-64, using bits 28-31 as failure indicator (and bit 31 as paged-out indicator) is not longer acceptable. Instead, a new ioctl with a separate error indication array is being introduced. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> --- drivers/xen/privcmd/compat_privcmd.c | 50 +++++++++++++++++ drivers/xen/privcmd/privcmd.c | 102 +++++++++++++++++++++++++++++++++++ fs/compat_ioctl.c | 1 include/xen/compat_ioctl.h | 20 ++++++ include/xen/public/privcmd.h | 10 +++ 5 files changed, 182 insertions(+), 1 deletion(-) diff -r 6061d5615522 -r 6d6c3dd995c0 drivers/xen/privcmd/compat_privcmd.c --- a/drivers/xen/privcmd/compat_privcmd.c Fri Jan 08 13:07:17 2010 +0000 +++ b/drivers/xen/privcmd/compat_privcmd.c Wed Jan 13 08:11:51 2010 +0000 @@ -97,6 +97,56 @@ int privcmd_ioctl_32(int fd, unsigned in #endif } break; + case IOCTL_PRIVCMD_MMAPBATCH_V2_32: { + struct privcmd_mmapbatch_v2 *p; + struct privcmd_mmapbatch_v2_32 *p32; + struct privcmd_mmapbatch_v2_32 n32; +#ifdef xen_pfn32_t + xen_pfn_t *__user arr; + xen_pfn32_t *__user arr32; + unsigned int i; +#endif + + p32 = compat_ptr(arg); + p = compat_alloc_user_space(sizeof(*p)); + if (copy_from_user(&n32, p32, sizeof(n32)) || + put_user(n32.num, &p->num) || + put_user(n32.dom, &p->dom) || + put_user(n32.addr, &p->addr) || + put_user(compat_ptr(n32.err), &p->err)) + return -EFAULT; +#ifdef xen_pfn32_t + arr = compat_alloc_user_space(n32.num * sizeof(*arr) + + sizeof(*p)); + arr32 = compat_ptr(n32.arr); + for (i = 0; i < n32.num; ++i) { + xen_pfn32_t mfn; + + if (get_user(mfn, arr32 + i) || put_user(mfn, arr + i)) + return -EFAULT; + } + + if (put_user(arr, &p->arr)) + return -EFAULT; +#else + if (put_user(compat_ptr(n32.arr), &p->arr)) + return -EFAULT; +#endif + + ret = sys_ioctl(fd, IOCTL_PRIVCMD_MMAPBATCH_V2, (unsigned long)p); + +#ifdef xen_pfn32_t + for (i = 0; !ret && i < n32.num; ++i) { + xen_pfn_t mfn; + + if (get_user(mfn, arr + i) || put_user(mfn, arr32 + i)) + ret = -EFAULT; + else if (mfn != (xen_pfn32_t)mfn) + ret = -ERANGE; + } +#endif + } + break; default: ret = -EINVAL; break; diff -r 6061d5615522 -r 6d6c3dd995c0 drivers/xen/privcmd/privcmd.c --- a/drivers/xen/privcmd/privcmd.c Fri Jan 08 13:07:17 2010 +0000 +++ b/drivers/xen/privcmd/privcmd.c Wed Jan 13 08:11:51 2010 +0000 @@ -321,6 +321,108 @@ static long privcmd_ioctl(struct file *f mmapbatch_out: list_for_each_safe(l,l2,&pagelist) free_page((unsigned long)l); + } + break; + + case IOCTL_PRIVCMD_MMAPBATCH_V2: { + privcmd_mmapbatch_v2_t m; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + const xen_pfn_t __user *p; + xen_pfn_t *mfn; + unsigned long addr, nr_pages; + unsigned int i, nr; + LIST_HEAD(pagelist); + struct list_head *l, *l2; + int *err, paged_out; + + if (!is_initial_xendomain()) + return -EPERM; + + if (copy_from_user(&m, udata, sizeof(m))) + return -EFAULT; + + nr_pages = m.num; + addr = m.addr; + if (m.num <= 0 || nr_pages > (ULONG_MAX >> PAGE_SHIFT) || + addr != m.addr || nr_pages > (-addr >> PAGE_SHIFT)) + return -EINVAL; + + p = m.arr; + for (i = 0; i < nr_pages; i += nr, p += nr) { + nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE); + + ret = -ENOMEM; + l = (struct list_head *)__get_free_page(GFP_KERNEL); + if (l == NULL) + goto mmapbatch_v2_out; + + INIT_LIST_HEAD(l); + list_add_tail(l, &pagelist); + + mfn = (void *)(l + 1); + ret = -EFAULT; + if (copy_from_user(mfn, p, nr * sizeof(*mfn))) + goto mmapbatch_v2_out; + } + + down_write(&mm->mmap_sem); + + vma = find_vma(mm, addr); + ret = -EINVAL; + if (!vma || + addr < vma->vm_start || + addr + (nr_pages << PAGE_SHIFT) > vma->vm_end || + !enforce_singleshot_mapping(vma, addr, nr_pages)) { + up_write(&mm->mmap_sem); + goto mmapbatch_v2_out; + } + + i = 0; + ret = 0; + paged_out = 0; + list_for_each(l, &pagelist) { + int rc; + + nr = i + min(nr_pages - i, MMAPBATCH_NR_PER_PAGE); + mfn = (void *)(l + 1); + err = (void *)(l + 1); + BUILD_BUG_ON(sizeof(*err) > sizeof(*mfn)); + + while (i < nr) { + rc = direct_remap_pfn_range(vma, addr & PAGE_MASK, + *mfn, PAGE_SIZE, + vma->vm_page_prot, m.dom); + if (rc < 0) { + if (rc == -ENOENT) + paged_out = 1; + ret++; + } else + BUG_ON(rc > 0); + *err++ = rc; + mfn++; i++; addr += PAGE_SIZE; + } + } + + up_write(&mm->mmap_sem); + + if (ret > 0) { + int __user *p = m.err; + + ret = paged_out ? -ENOENT : 0; + i = 0; + list_for_each(l, &pagelist) { + nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE); + err = (void *)(l + 1); + if (copy_to_user(p, err, nr * sizeof(*err))) + ret = -EFAULT; + i += nr; p += nr; + } + } + + mmapbatch_v2_out: + list_for_each_safe(l, l2, &pagelist) + free_page((unsigned long)l); #undef MMAPBATCH_NR_PER_PAGE } break; diff -r 6061d5615522 -r 6d6c3dd995c0 fs/compat_ioctl.c --- a/fs/compat_ioctl.c Fri Jan 08 13:07:17 2010 +0000 +++ b/fs/compat_ioctl.c Wed Jan 13 08:11:51 2010 +0000 @@ -2959,6 +2959,7 @@ HANDLE_IOCTL(LPSETTIMEOUT, lp_timeout_tr #ifdef CONFIG_XEN HANDLE_IOCTL(IOCTL_PRIVCMD_MMAP_32, privcmd_ioctl_32) HANDLE_IOCTL(IOCTL_PRIVCMD_MMAPBATCH_32, privcmd_ioctl_32) +HANDLE_IOCTL(IOCTL_PRIVCMD_MMAPBATCH_V2_32, privcmd_ioctl_32) COMPATIBLE_IOCTL(IOCTL_PRIVCMD_HYPERCALL) COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_VIRQ) COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_INTERDOMAIN) diff -r 6061d5615522 -r 6d6c3dd995c0 include/xen/compat_ioctl.h --- a/include/xen/compat_ioctl.h Fri Jan 08 13:07:17 2010 +0000 +++ b/include/xen/compat_ioctl.h Wed Jan 13 08:11:51 2010 +0000 @@ -49,9 +49,27 @@ struct privcmd_mmapbatch_32 { #endif compat_uptr_t arr; /* array of mfns - top nibble set on err */ }; + +struct privcmd_mmapbatch_v2_32 { + unsigned int num; /* number of pages to populate */ + domid_t dom; /* target domain */ +#if defined(CONFIG_X86) || defined(CONFIG_IA64) + union { /* virtual address */ + __u64 addr __attribute__((packed)); + __u32 va; /* ensures union is 4-byte aligned */ + }; +#else + __u64 addr; /* virtual address */ +#endif + compat_uptr_t arr; /* array of mfns */ + compat_uptr_t err; /* array of error codes */ +}; + #define IOCTL_PRIVCMD_MMAP_32 \ _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap_32)) -#define IOCTL_PRIVCMD_MMAPBATCH_32 \ +#define IOCTL_PRIVCMD_MMAPBATCH_32 \ _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch_32)) +#define IOCTL_PRIVCMD_MMAPBATCH_V2_32 \ + _IOC(_IOC_NONE, 'P', 4, sizeof(struct privcmd_mmapbatch_v2_32)) #endif /* __LINUX_XEN_COMPAT_H__ */ diff -r 6061d5615522 -r 6d6c3dd995c0 include/xen/public/privcmd.h --- a/include/xen/public/privcmd.h Fri Jan 08 13:07:17 2010 +0000 +++ b/include/xen/public/privcmd.h Wed Jan 13 08:11:51 2010 +0000 @@ -64,6 +64,14 @@ typedef struct privcmd_mmapbatch { xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */ } privcmd_mmapbatch_t; +typedef struct privcmd_mmapbatch_v2 { + unsigned int num; /* number of pages to populate */ + domid_t dom; /* target domain */ + __u64 addr; /* virtual address */ + const xen_pfn_t __user *arr; /* array of mfns */ + int __user *err; /* array of error codes */ +} privcmd_mmapbatch_v2_t; + /* * @cmd: IOCTL_PRIVCMD_HYPERCALL * @arg: &privcmd_hypercall_t @@ -75,5 +83,7 @@ typedef struct privcmd_mmapbatch { _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t)) #define IOCTL_PRIVCMD_MMAPBATCH \ _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t)) +#define IOCTL_PRIVCMD_MMAPBATCH_V2 \ + _IOC(_IOC_NONE, 'P', 4, sizeof(privcmd_mmapbatch_v2_t)) #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |