[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] Re: [PATCH] xen/privcmd: move remap_domain_mfn_range() to core xen code and export.
You both should have got a CC of this but git-send-email is playing up. On Thu, 2009-05-21 at 05:18 -0400, Ian Campbell wrote: > This allows xenfs to be built as a module, previously it required > flush_tlb_all > and arbitrary_virt_to_machine to be exported. > > Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx> > --- > > I'm not sure about the IA64 impact. Under 2.6.18 their > direct_remap_pfn_range is very different to the x86 one. So I've > assumed it should be specific here too. > > arch/x86/xen/mmu.c | 66 +++++++++++++++++++++++++++++++++++ > drivers/xen/xenfs/privcmd.c | 81 ++++-------------------------------------- > include/xen/xen-ops.h | 5 +++ > 3 files changed, 79 insertions(+), 73 deletions(-) > > diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c > index fefdeee..8c53fc9 100644 > --- a/arch/x86/xen/mmu.c > +++ b/arch/x86/xen/mmu.c > @@ -2323,6 +2323,72 @@ void xen_destroy_contiguous_region(unsigned long > vstart, unsigned int order) > } > EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); > > +#define REMAP_BATCH_SIZE 16 > + > +struct remap_data { > + unsigned long mfn; > + pgprot_t prot; > + struct mmu_update *mmu_update; > +}; > + > +static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, > + unsigned long addr, void *data) > +{ > + struct remap_data *rmd = data; > + pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); > + > + rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr; > + rmd->mmu_update->val = pte_val_ma(pte); > + rmd->mmu_update++; > + > + return 0; > +} > + > +int xen_remap_domain_mfn_range(struct vm_area_struct *vma, > + unsigned long addr, > + unsigned long mfn, int nr, > + pgprot_t prot, unsigned domid) > +{ > + struct remap_data rmd; > + struct mmu_update mmu_update[REMAP_BATCH_SIZE]; > + int batch; > + unsigned long range; > + int err = 0; > + > + prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); > + > + vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; > + > + rmd.mfn = mfn; > + rmd.prot = prot; > + > + while (nr) { > + batch = min(REMAP_BATCH_SIZE, nr); > + range = (unsigned long)batch << PAGE_SHIFT; > + > + rmd.mmu_update = mmu_update; > + err = apply_to_page_range(vma->vm_mm, addr, range, > + remap_area_mfn_pte_fn, &rmd); > + if (err) > + goto out; > + > + err = -EFAULT; > + if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0) > + goto out; > + > + nr -= batch; > + addr += range; > + } > + > + err = 0; > +out: > + > + flush_tlb_all(); > + > + return err; > +} > +EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); > + > #ifdef CONFIG_XEN_DEBUG_FS > > static struct dentry *d_mmu_debug; > diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c > index 110b062..a3fee58 100644 > --- a/drivers/xen/xenfs/privcmd.c > +++ b/drivers/xen/xenfs/privcmd.c > @@ -30,76 +30,12 @@ > #include <xen/interface/xen.h> > #include <xen/features.h> > #include <xen/page.h> > - > -#define REMAP_BATCH_SIZE 16 > +#include <xen/xen-ops.h> > > #ifndef HAVE_ARCH_PRIVCMD_MMAP > static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); > #endif > > -struct remap_data { > - unsigned long mfn; > - pgprot_t prot; > - struct mmu_update *mmu_update; > -}; > - > -static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, > - unsigned long addr, void *data) > -{ > - struct remap_data *rmd = data; > - pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); > - > - rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr; > - rmd->mmu_update->val = pte_val_ma(pte); > - rmd->mmu_update++; > - > - return 0; > -} > - > -static int remap_domain_mfn_range(struct vm_area_struct *vma, > - unsigned long addr, > - unsigned long mfn, int nr, > - pgprot_t prot, unsigned domid) > -{ > - struct remap_data rmd; > - struct mmu_update mmu_update[REMAP_BATCH_SIZE]; > - int batch; > - unsigned long range; > - int err = 0; > - > - prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); > - > - vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; > - > - rmd.mfn = mfn; > - rmd.prot = prot; > - > - while (nr) { > - batch = min(REMAP_BATCH_SIZE, nr); > - range = (unsigned long)batch << PAGE_SHIFT; > - > - rmd.mmu_update = mmu_update; > - err = apply_to_page_range(vma->vm_mm, addr, range, > - remap_area_mfn_pte_fn, &rmd); > - if (err) > - goto out; > - > - err = -EFAULT; > - if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0) > - goto out; > - > - nr -= batch; > - addr += range; > - } > - > - err = 0; > -out: > - > - flush_tlb_all(); > - > - return err; > -} > - > static long privcmd_ioctl_hypercall(void __user *udata) > { > privcmd_hypercall_t hypercall; > @@ -232,11 +168,11 @@ static int mmap_mfn_range(void *data, void *state) > ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) > return -EINVAL; > > - rc = remap_domain_mfn_range(vma, > - msg->va & PAGE_MASK, > - msg->mfn, msg->npages, > - vma->vm_page_prot, > - st->domain); > + rc = xen_remap_domain_mfn_range(vma, > + msg->va & PAGE_MASK, > + msg->mfn, msg->npages, > + vma->vm_page_prot, > + st->domain); > if (rc < 0) > return rc; > > @@ -314,9 +250,8 @@ static int mmap_batch_fn(void *data, void *state) > xen_pfn_t *mfnp = data; > struct mmap_batch_state *st = state; > > - if (remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, > - *mfnp, 1, > - st->vma->vm_page_prot, st->domain) < 0) { > + if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, > + st->vma->vm_page_prot, st->domain) < 0) { > *mfnp |= 0xf0000000U; > st->err++; > } > diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h > index d789c93..4d8a23e 100644 > --- a/include/xen/xen-ops.h > +++ b/include/xen/xen-ops.h > @@ -20,4 +20,9 @@ int xen_create_contiguous_region(unsigned long vstart, > unsigned int order, > > void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order); > > +int xen_remap_domain_mfn_range(struct vm_area_struct *vma, > + unsigned long addr, > + unsigned long mfn, int nr, > + pgprot_t prot, unsigned domid); > + > #endif /* INCLUDE_XEN_OPS_H */ _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |