[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [XEN][POWERPC] allocate HTAB using shadow calls
# HG changeset patch # User Jimi Xenidis <jimix@xxxxxxxxxxxxxx> # Node ID e07281779b8805ef05957150774d44830b3cb2b6 # Parent bc349d862a5d6759eee86a3ebdd5c014f75781ff [XEN][POWERPC] allocate HTAB using shadow calls This patch will use the shadow_ops call to allocate the domains Hashed Page-table. This allows for the management tools to customize its size for the domain. Warning! The management tools have yet to be updated to use this properly and update the devtree accordingly so code will assume 64M RAM and 1M HTAB while the shadow up continues to receive a 0 request. Signed-off-by: Jimi Xenidis <jimix@xxxxxxxxxxxxxx> Signed-off-by: Hollis Blanchard <hollisb@xxxxxxxxxx> --- xen/arch/powerpc/htab.c | 68 --------------------- xen/arch/powerpc/Makefile | 1 xen/arch/powerpc/domain.c | 29 ++++----- xen/arch/powerpc/domain_build.c | 32 ++++------ xen/arch/powerpc/shadow.c | 122 ++++++++++++++++++++++++++++++++++++--- xen/include/asm-powerpc/htab.h | 4 - xen/include/asm-powerpc/shadow.h | 9 ++ 7 files changed, 152 insertions(+), 113 deletions(-) diff -r bc349d862a5d -r e07281779b88 xen/arch/powerpc/Makefile --- a/xen/arch/powerpc/Makefile Sun Aug 27 16:12:00 2006 -0400 +++ b/xen/arch/powerpc/Makefile Sun Aug 27 20:48:06 2006 -0400 @@ -20,7 +20,6 @@ obj-y += external.o obj-y += external.o obj-y += float.o obj-y += hcalls.o -obj-y += htab.o obj-y += iommu.o obj-y += irq.o obj-y += mambo.o diff -r bc349d862a5d -r e07281779b88 xen/arch/powerpc/domain.c --- a/xen/arch/powerpc/domain.c Sun Aug 27 16:12:00 2006 -0400 +++ b/xen/arch/powerpc/domain.c Sun Aug 27 20:48:06 2006 -0400 @@ -27,6 +27,7 @@ #include <xen/domain.h> #include <xen/console.h> #include <xen/shutdown.h> +#include <xen/shadow.h> #include <xen/mm.h> #include <asm/htab.h> #include <asm/current.h> @@ -77,7 +78,6 @@ int arch_domain_create(struct domain *d) unsigned long rma_base; unsigned long rma_sz; uint rma_order_pages; - uint htab_order_pages; int rc; if (d->domain_id == IDLE_DOMAIN_ID) { @@ -104,16 +104,6 @@ int arch_domain_create(struct domain *d) d->arch.large_page_sizes = cpu_large_page_orders( d->arch.large_page_order, ARRAY_SIZE(d->arch.large_page_order)); - /* FIXME: we need to the the maximum addressible memory for this - * domain to calculate this correctly. It should probably be set - * by the managment tools */ - htab_order_pages = rma_order_pages - 6; /* (1/64) */ - if (test_bit(_DOMF_privileged, &d->domain_flags)) { - /* bump the htab size of privleged domains */ - ++htab_order_pages; - } - htab_alloc(d, htab_order_pages); - INIT_LIST_HEAD(&d->arch.extent_list); return 0; @@ -121,7 +111,7 @@ int arch_domain_create(struct domain *d) void arch_domain_destroy(struct domain *d) { - htab_free(d); + shadow_teardown(d); } void machine_halt(void) @@ -162,6 +152,16 @@ int arch_set_info_guest(struct vcpu *v, int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_t *c) { memcpy(&v->arch.ctxt, &c->user_regs, sizeof(c->user_regs)); + + printf("Domain[%d].%d: initializing\n", + v->domain->domain_id, v->vcpu_id); + + if (v->domain->arch.htab.order == 0) + panic("Page table never allocated for Domain: %d\n", + v->domain->domain_id); + if (v->domain->arch.rma_order == 0) + panic("RMA never allocated for Domain: %d\n", + v->domain->domain_id); set_bit(_VCPUF_initialised, &v->vcpu_flags); @@ -253,12 +253,13 @@ void continue_running(struct vcpu *same) void continue_running(struct vcpu *same) { /* nothing to do */ + return; } void sync_vcpu_execstate(struct vcpu *v) { - /* XXX for now, for domain destruction, make this non-fatal */ - printf("%s: called\n", __func__); + /* do nothing */ + return; } void domain_relinquish_resources(struct domain *d) diff -r bc349d862a5d -r e07281779b88 xen/arch/powerpc/domain_build.c --- a/xen/arch/powerpc/domain_build.c Sun Aug 27 16:12:00 2006 -0400 +++ b/xen/arch/powerpc/domain_build.c Sun Aug 27 20:48:06 2006 -0400 @@ -25,12 +25,11 @@ #include <xen/init.h> #include <xen/ctype.h> #include <xen/iocap.h> +#include <xen/shadow.h> #include <xen/version.h> #include <asm/processor.h> #include <asm/papr.h> #include "oftree.h" - -#define log2(x) ffz(~(x)) extern int parseelfimage_32(struct domain_setup_info *dsi); extern int loadelfimage_32(struct domain_setup_info *dsi); @@ -114,10 +113,10 @@ int construct_dom0(struct domain *d, uint rma_nrpages = 1 << d->arch.rma_order; ulong rma_sz = rma_size(d->arch.rma_order); ulong rma = page_to_maddr(d->arch.rma_page); - uint htab_order; start_info_t *si; ulong eomem; int am64 = 1; + int preempt = 0; ulong msr; ulong pc; ulong r2; @@ -170,20 +169,19 @@ int construct_dom0(struct domain *d, dom0_nrpages = allocate_extents(d, dom0_nrpages, rma_nrpages); d->tot_pages = dom0_nrpages; - ASSERT(d->tot_pages > 0); - - htab_order = log2(d->tot_pages) - 6; - if (d->arch.htab.order > 0) { - /* we incorrectly allocate this too early so lets adjust if - * necessary */ - printk("WARNING: htab allocated to early\n"); - if (d->arch.htab.order < htab_order) { - printk("WARNING: htab reallocated for more memory: 0x%x\n", - htab_order); - htab_free(d); - htab_alloc(d, htab_order); - } - } + ASSERT(d->tot_pages >= rma_nrpages); + + if (opt_dom0_shadow == 0) { + /* 1/64 of memory */ + opt_dom0_shadow = (d->tot_pages >> 6) >> (20 - PAGE_SHIFT); + } + + do { + shadow_set_allocation(d, opt_dom0_shadow, &preempt); + } while (preempt); + if (shadow_get_allocation(d) == 0) + panic("shadow allocation failed 0x%x < 0x%x\n", + shadow_get_allocation(d), opt_dom0_shadow); ASSERT( image_len < rma_sz ); diff -r bc349d862a5d -r e07281779b88 xen/arch/powerpc/shadow.c --- a/xen/arch/powerpc/shadow.c Sun Aug 27 16:12:00 2006 -0400 +++ b/xen/arch/powerpc/shadow.c Sun Aug 27 20:48:06 2006 -0400 @@ -23,6 +23,99 @@ #include <xen/shadow.h> #include <public/dom0_ops.h> +static ulong htab_calc_sdr1(ulong htab_addr, ulong log_htab_size) +{ + ulong sdr1_htabsize; + + ASSERT((htab_addr & ((1UL << log_htab_size) - 1)) == 0); + ASSERT(log_htab_size <= SDR1_HTABSIZE_MAX); + ASSERT(log_htab_size >= HTAB_MIN_LOG_SIZE); + + sdr1_htabsize = log_htab_size - LOG_PTEG_SIZE - SDR1_HTABSIZE_BASEBITS; + + return (htab_addr | (sdr1_htabsize & SDR1_HTABSIZE_MASK)); +} + +static ulong htab_alloc(struct domain *d, uint order) +{ + ulong htab_raddr; + uint log_htab_bytes = order + PAGE_SHIFT; + uint htab_bytes = 1UL << log_htab_bytes; + + /* we use xenheap pages to keep domheap pages usefull for domains */ + + if (order < 6) + order = 6; /* architectural minimum is 2^18 */ + if (order < 34) + order = 34; /* architectural minimum is 2^46 */ + + htab_raddr = (ulong)alloc_xenheap_pages(order); + if (htab_raddr > 0) { + ASSERT((htab_raddr & (htab_bytes - 1)) == 0); + + d->arch.htab.order = order; + d->arch.htab.log_num_ptes = log_htab_bytes - LOG_PTE_SIZE; + d->arch.htab.sdr1 = htab_calc_sdr1(htab_raddr, log_htab_bytes); + d->arch.htab.map = (union pte *)htab_raddr; + } + return htab_raddr; +} + +static void htab_free(struct domain *d) +{ + ulong htab_raddr = GET_HTAB(d); + + free_xenheap_pages((void *)htab_raddr, d->arch.htab.order); +} + + +unsigned int shadow_teardown(struct domain *d) +{ + htab_free(d); + return 0; +} + +unsigned int shadow_set_allocation(struct domain *d, + unsigned int megabytes, + int *preempted) +{ + unsigned int rc; + uint pages; + uint p; + uint order; + ulong addr; + + + if (d->arch.htab.order) + return -EBUSY; + + if (megabytes == 0) { + /* old management tools */ + megabytes = 1; /* 1/64th of 64M */ + printk("%s: Fix management tools to set and get shadow/htab values\n" + " using %d MiB htab\n", + __func__, megabytes); + } + pages = megabytes << (20 - PAGE_SHIFT); + order = fls(pages) - 1; /* log2 truncated */ + if (pages & ((1 << order) - 1)) + ++order; /* round up */ + + addr = htab_alloc(d, order); + + printk("%s: ibm,fpt-size should be: 0x%x\n", __func__, + d->arch.htab.log_num_ptes + LOG_PTE_SIZE); + + if (addr == 0) + return -ENOMEM; + + /* XXX make this a continuation */ + for (p = 0; p < (1 << order); p++) + clear_page((void *)(addr + (p << PAGE_SHIFT))); + + return rc; +} + int shadow_control_op(struct domain *d, dom0_shadow_control_t *sc, XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op) @@ -36,17 +129,28 @@ int shadow_control_op(struct domain *d, switch ( sc->op ) { case DOM0_SHADOW_CONTROL_OP_OFF: + DPRINTK("Shadow is mandatory!\n"); + return -EINVAL; + + case DOM0_SHADOW2_CONTROL_OP_GET_ALLOCATION: + sc->mb = shadow_get_allocation(d); return 0; - case DOM0_SHADOW2_CONTROL_OP_GET_ALLOCATION: - sc->mb = 0; - return 0; - case DOM0_SHADOW2_CONTROL_OP_SET_ALLOCATION: - if (sc->mb > 0) { - BUG(); - return -ENOMEM; - } - return 0; + case DOM0_SHADOW2_CONTROL_OP_SET_ALLOCATION: { + int rc; + int preempted = 0; + + rc = shadow_set_allocation(d, sc->mb, &preempted); + + if (preempted) + /* Not finished. Set up to re-run the call. */ + rc = hypercall_create_continuation( + __HYPERVISOR_dom0_op, "h", u_dom0_op); + else + /* Finished. Return the new allocation */ + sc->mb = shadow_get_allocation(d); + return rc; + } default: printk("Bad shadow op %u\n", sc->op); diff -r bc349d862a5d -r e07281779b88 xen/include/asm-powerpc/htab.h --- a/xen/include/asm-powerpc/htab.h Sun Aug 27 16:12:00 2006 -0400 +++ b/xen/include/asm-powerpc/htab.h Sun Aug 27 20:48:06 2006 -0400 @@ -133,8 +133,4 @@ struct domain_htab { union pte *map; /* access the htab like an array */ ulong *shadow; /* idx -> logical translation array */ }; - -struct domain; -extern void htab_alloc(struct domain *d, uint order); -extern void htab_free(struct domain *d); #endif diff -r bc349d862a5d -r e07281779b88 xen/include/asm-powerpc/shadow.h --- a/xen/include/asm-powerpc/shadow.h Sun Aug 27 16:12:00 2006 -0400 +++ b/xen/include/asm-powerpc/shadow.h Sun Aug 27 20:48:06 2006 -0400 @@ -60,4 +60,13 @@ extern int shadow_control_op(struct doma extern int shadow_control_op(struct domain *d, dom0_shadow_control_t *sc, XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op); +extern unsigned int shadow_teardown(struct domain *d); +extern unsigned int shadow_set_allocation( + struct domain *d, unsigned int megabytes, int *preempted); + +/* Return the size of the shadow2 pool, rounded up to the nearest MB */ +static inline unsigned int shadow_get_allocation(struct domain *d) +{ + return (1ULL << (d->arch.htab.order + PAGE_SHIFT)) >> 20; +} #endif diff -r bc349d862a5d -r e07281779b88 xen/arch/powerpc/htab.c --- a/xen/arch/powerpc/htab.c Sun Aug 27 16:12:00 2006 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,68 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - * - * Copyright (C) IBM Corp. 2005 - * - * Authors: Hollis Blanchard <hollisb@xxxxxxxxxx> - */ - -#include <xen/config.h> -#include <xen/sched.h> - -static ulong htab_calc_sdr1(ulong htab_addr, ulong log_htab_size) -{ - ulong sdr1_htabsize; - - ASSERT((htab_addr & ((1UL << log_htab_size) - 1)) == 0); - ASSERT(log_htab_size <= SDR1_HTABSIZE_MAX); - ASSERT(log_htab_size >= HTAB_MIN_LOG_SIZE); - - sdr1_htabsize = log_htab_size - LOG_PTEG_SIZE - SDR1_HTABSIZE_BASEBITS; - - return (htab_addr | (sdr1_htabsize & SDR1_HTABSIZE_MASK)); -} - -void htab_alloc(struct domain *d, uint order) -{ - ulong htab_raddr; - ulong log_htab_bytes = order + PAGE_SHIFT; - ulong htab_bytes = 1UL << log_htab_bytes; - - /* XXX use alloc_domheap_pages instead? */ - htab_raddr = (ulong)alloc_xenheap_pages(order); - ASSERT(htab_raddr != 0); - /* XXX check alignment guarantees */ - ASSERT((htab_raddr & (htab_bytes - 1)) == 0); - - /* XXX slow. move memset out to service partition? */ - memset((void *)htab_raddr, 0, htab_bytes); - - d->arch.htab.order = order; - d->arch.htab.log_num_ptes = log_htab_bytes - LOG_PTE_SIZE; - d->arch.htab.sdr1 = htab_calc_sdr1(htab_raddr, log_htab_bytes); - d->arch.htab.map = (union pte *)htab_raddr; - d->arch.htab.shadow = xmalloc_array(ulong, - 1UL << d->arch.htab.log_num_ptes); - ASSERT(d->arch.htab.shadow != NULL); -} - -void htab_free(struct domain *d) -{ - ulong htab_raddr = GET_HTAB(d); - - free_xenheap_pages((void *)htab_raddr, d->arch.htab.order); - xfree(d->arch.htab.shadow); -} - _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |