[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86: introduce alloc_vcpu_guest_context()
# HG changeset patch # User Jan Beulich <jbeulich@xxxxxxxxxx> # Date 1302005009 -3600 # Node ID 4fe0442aa5b7434ed10e63c027bbe9e9f6642dae # Parent 37c4f7d492a419b8dd819f7d0e0902128e85bba8 x86: introduce alloc_vcpu_guest_context() This is necessary because on x86-64 struct vcpu_guest_context is larger than PAGE_SIZE, and hence not suitable for a general purpose runtime allocation. On x86-32, FIX_PAE_HIGHMEM_* fixmap entries are being re-used, whiule on x86-64 new per-CPU fixmap entries get introduced. The implication of using per-CPU fixmaps is that these allocations have to happen from non-preemptable hypercall context (which they all do). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> --- diff -r 37c4f7d492a4 -r 4fe0442aa5b7 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Tue Apr 05 13:02:57 2011 +0100 +++ b/xen/arch/x86/domain.c Tue Apr 05 13:03:29 2011 +0100 @@ -45,6 +45,7 @@ #include <asm/mpspec.h> #include <asm/ldt.h> #include <asm/hypercall.h> +#include <asm/fixmap.h> #include <asm/hvm/hvm.h> #include <asm/hvm/support.h> #include <asm/debugreg.h> @@ -221,6 +222,53 @@ free_xenheap_page(v); } +static DEFINE_PER_CPU(struct page_info *[ + PFN_UP(sizeof(struct vcpu_guest_context))], vgc_pages); + +struct vcpu_guest_context *alloc_vcpu_guest_context(void) +{ + unsigned int i, cpu = smp_processor_id(); + enum fixed_addresses idx = FIX_VGC_BEGIN - + cpu * PFN_UP(sizeof(struct vcpu_guest_context)); + +#ifdef __i386__ + BUILD_BUG_ON(sizeof(struct vcpu_guest_context) > PAGE_SIZE); +#endif + BUG_ON(per_cpu(vgc_pages[0], cpu) != NULL); + + for ( i = 0; i < PFN_UP(sizeof(struct vcpu_guest_context)); ++i ) + { + struct page_info *pg = alloc_domheap_page(NULL, 0); + + if ( unlikely(pg == NULL) ) + { + free_vcpu_guest_context(NULL); + return NULL; + } + __set_fixmap(idx - i, page_to_mfn(pg), __PAGE_HYPERVISOR); + per_cpu(vgc_pages[i], cpu) = pg; + } + return (void *)fix_to_virt(idx); +} + +void free_vcpu_guest_context(struct vcpu_guest_context *vgc) +{ + unsigned int i, cpu = smp_processor_id(); + enum fixed_addresses idx = FIX_VGC_BEGIN - + cpu * PFN_UP(sizeof(struct vcpu_guest_context)); + + BUG_ON(vgc && vgc != (void *)fix_to_virt(idx)); + + for ( i = 0; i < PFN_UP(sizeof(struct vcpu_guest_context)); ++i ) + { + if ( !per_cpu(vgc_pages[i], cpu) ) + continue; + __set_fixmap(idx - i, 0, 0); + free_domheap_page(per_cpu(vgc_pages[i], cpu)); + per_cpu(vgc_pages[i], cpu) = NULL; + } +} + #ifdef __x86_64__ static int setup_compat_l4(struct vcpu *v) diff -r 37c4f7d492a4 -r 4fe0442aa5b7 xen/common/domain.c --- a/xen/common/domain.c Tue Apr 05 13:02:57 2011 +0100 +++ b/xen/common/domain.c Tue Apr 05 13:03:29 2011 +0100 @@ -832,12 +832,12 @@ if ( v->vcpu_info == &dummy_vcpu_info ) return -EINVAL; - if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) + if ( (ctxt = alloc_vcpu_guest_context()) == NULL ) return -ENOMEM; if ( copy_from_guest(ctxt, arg, 1) ) { - xfree(ctxt); + free_vcpu_guest_context(ctxt); return -EFAULT; } @@ -847,7 +847,7 @@ rc = boot_vcpu(d, vcpuid, ctxt); domain_unlock(d); - xfree(ctxt); + free_vcpu_guest_context(ctxt); break; case VCPUOP_up: diff -r 37c4f7d492a4 -r 4fe0442aa5b7 xen/common/domctl.c --- a/xen/common/domctl.c Tue Apr 05 13:02:57 2011 +0100 +++ b/xen/common/domctl.c Tue Apr 05 13:03:29 2011 +0100 @@ -295,7 +295,7 @@ < sizeof(struct compat_vcpu_guest_context)); #endif ret = -ENOMEM; - if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL ) + if ( (c.nat = alloc_vcpu_guest_context()) == NULL ) goto svc_out; #ifdef CONFIG_COMPAT @@ -318,7 +318,7 @@ } svc_out: - xfree(c.nat); + free_vcpu_guest_context(c.nat); rcu_unlock_domain(d); } break; diff -r 37c4f7d492a4 -r 4fe0442aa5b7 xen/include/asm-ia64/domain.h --- a/xen/include/asm-ia64/domain.h Tue Apr 05 13:02:57 2011 +0100 +++ b/xen/include/asm-ia64/domain.h Tue Apr 05 13:03:29 2011 +0100 @@ -25,6 +25,9 @@ extern void relinquish_vcpu_resources(struct vcpu *v); extern int vcpu_late_initialise(struct vcpu *v); +#define alloc_vcpu_guest_context() xmalloc(struct vcpu_guest_context) +#define free_vcpu_guest_context(vgc) xfree(vgc) + /* given a current domain metaphysical address, return the physical address */ extern unsigned long translate_domain_mpaddr(unsigned long mpaddr, struct p2m_entry* entry); diff -r 37c4f7d492a4 -r 4fe0442aa5b7 xen/include/asm-x86/fixmap.h --- a/xen/include/asm-x86/fixmap.h Tue Apr 05 13:02:57 2011 +0100 +++ b/xen/include/asm-x86/fixmap.h Tue Apr 05 13:03:29 2011 +0100 @@ -16,6 +16,7 @@ #include <asm/apicdef.h> #include <asm/acpi.h> #include <asm/page.h> +#include <xen/pfn.h> #include <xen/kexec.h> #include <xen/iommu.h> #include <asm/amd-iommu.h> @@ -34,6 +35,12 @@ #ifdef __i386__ FIX_PAE_HIGHMEM_0, FIX_PAE_HIGHMEM_END = FIX_PAE_HIGHMEM_0 + NR_CPUS-1, +#define FIX_VGC_END FIX_PAE_HIGHMEM_0 +#define FIX_VGC_BEGIN FIX_PAE_HIGHMEM_END +#else + FIX_VGC_END, + FIX_VGC_BEGIN = FIX_VGC_END + + PFN_UP(sizeof(struct vcpu_guest_context)) * NR_CPUS - 1, #endif FIX_APIC_BASE, FIX_IO_APIC_BASE_0, diff -r 37c4f7d492a4 -r 4fe0442aa5b7 xen/include/xen/domain.h --- a/xen/include/xen/domain.h Tue Apr 05 13:02:57 2011 +0100 +++ b/xen/include/xen/domain.h Tue Apr 05 13:03:29 2011 +0100 @@ -32,6 +32,12 @@ struct vcpu *alloc_vcpu_struct(void); void free_vcpu_struct(struct vcpu *v); +/* Allocate/free a vcpu_guest_context structure. */ +#ifndef alloc_vcpu_guest_context +struct vcpu_guest_context *alloc_vcpu_guest_context(void); +void free_vcpu_guest_context(struct vcpu_guest_context *); +#endif + /* * Initialise/destroy arch-specific details of a VCPU. * - vcpu_initialise() is called after the basic generic fields of the _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |