|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 09/14] xen: arm: enable balloon driver
On Thu, 4 Oct 2012, Ian Campbell wrote:
> Drop the *_xenballloned_pages duplicates since these are now supplied
> by the balloon code.
>
> Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Acked-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> arch/arm/xen/enlighten.c | 23 +++++------------------
> drivers/xen/Makefile | 4 ++--
> drivers/xen/privcmd.c | 9 ++++-----
> 3 files changed, 11 insertions(+), 25 deletions(-)
>
> diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
> index 59bcb96..ba5cc13 100644
> --- a/arch/arm/xen/enlighten.c
> +++ b/arch/arm/xen/enlighten.c
> @@ -8,6 +8,7 @@
> #include <xen/features.h>
> #include <xen/platform_pci.h>
> #include <xen/xenbus.h>
> +#include <xen/page.h>
> #include <asm/xen/hypervisor.h>
> #include <asm/xen/hypercall.h>
> #include <linux/interrupt.h>
> @@ -29,6 +30,10 @@ struct shared_info *HYPERVISOR_shared_info = (void
> *)&xen_dummy_shared_info;
>
> DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
>
> +/* These are unused until we support booting "pre-ballooned" */
> +unsigned long xen_released_pages;
> +struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
> +
> /* TODO: to be removed */
> __read_mostly int xen_have_vector_callback;
> EXPORT_SYMBOL_GPL(xen_have_vector_callback);
> @@ -148,21 +153,3 @@ static int __init xen_init_events(void)
> return 0;
> }
> postcore_initcall(xen_init_events);
> -
> -/* XXX: only until balloon is properly working */
> -int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
> -{
> - *pages = alloc_pages(highmem ? GFP_HIGHUSER : GFP_KERNEL,
> - get_order(nr_pages));
> - if (*pages == NULL)
> - return -ENOMEM;
> - return 0;
> -}
> -EXPORT_SYMBOL_GPL(alloc_xenballooned_pages);
> -
> -void free_xenballooned_pages(int nr_pages, struct page **pages)
> -{
> - kfree(*pages);
> - *pages = NULL;
> -}
> -EXPORT_SYMBOL_GPL(free_xenballooned_pages);
> diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
> index 275abfc..9a7896f 100644
> --- a/drivers/xen/Makefile
> +++ b/drivers/xen/Makefile
> @@ -1,8 +1,8 @@
> ifneq ($(CONFIG_ARM),y)
> -obj-y += manage.o balloon.o
> +obj-y += manage.o
> obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
> endif
> -obj-y += grant-table.o features.o events.o
> +obj-y += grant-table.o features.o events.o balloon.o
> obj-y += xenbus/
>
> nostackp := $(call cc-option, -fno-stack-protector)
> diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
> index 1010bf7..bf4d62a 100644
> --- a/drivers/xen/privcmd.c
> +++ b/drivers/xen/privcmd.c
> @@ -200,8 +200,8 @@ static long privcmd_ioctl_mmap(void __user *udata)
> if (!xen_initial_domain())
> return -EPERM;
>
> - /* PVH: TBD/FIXME. For now we only support privcmd_ioctl_mmap_batch */
> - if (xen_pv_domain() && xen_feature(XENFEAT_auto_translated_physmap))
> + /* We only support privcmd_ioctl_mmap_batch for auto translated. */
> + if (xen_feature(XENFEAT_auto_translated_physmap))
> return -ENOSYS;
>
> if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
> @@ -413,7 +413,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata,
> int version)
> up_write(&mm->mmap_sem);
> goto out;
> }
> - if (xen_pv_domain() && xen_feature(XENFEAT_auto_translated_physmap)) {
> + if (xen_feature(XENFEAT_auto_translated_physmap)) {
> if ((ret = pvh_privcmd_resv_pfns(vma, m.num))) {
> up_write(&mm->mmap_sem);
> goto out;
> @@ -492,8 +492,7 @@ static void privcmd_close(struct vm_area_struct *vma)
> int count;
> struct xen_pvh_pfn_info *pvhp = vma ? vma->vm_private_data : NULL;
>
> - if (!xen_pv_domain() || !pvhp ||
> - !xen_feature(XENFEAT_auto_translated_physmap))
> + if (!pvhp || !xen_feature(XENFEAT_auto_translated_physmap))
> return;
>
> count = xen_unmap_domain_mfn_range(vma, pvhp);
> --
> 1.7.2.5
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |