[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Add -Wdeclaration-after-statement to Xen and tools build.
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID c7508abc5b6b1aac2f8ee63fe56922f43c457cc3 # Parent f7bee3cb1bf1d4ff5a63153948effaa35b0f6f01 Add -Wdeclaration-after-statement to Xen and tools build. Fix the compile errors that result from this. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> diff -r f7bee3cb1bf1 -r c7508abc5b6b Config.mk --- a/Config.mk Tue Nov 22 17:21:22 2005 +++ b/Config.mk Tue Nov 22 17:44:08 2005 @@ -8,6 +8,7 @@ # Tools to run on system hosting the build HOSTCC = gcc HOSTCFLAGS = -Wall -Werror -Wstrict-prototypes -O2 -fomit-frame-pointer +HOSTCFLAGS += -Wdeclaration-after-statement AS = $(CROSS_COMPILE)as LD = $(CROSS_COMPILE)ld @@ -38,6 +39,8 @@ EXTRA_LIB += $(EXTRA_PREFIX)/$(LIBDIR) endif +CFLAGS += -Wdeclaration-after-statement + LDFLAGS += $(foreach i, $(EXTRA_LIB), -L$(i)) CFLAGS += $(foreach i, $(EXTRA_INCLUDES), -I$(i)) diff -r f7bee3cb1bf1 -r c7508abc5b6b tools/libxc/xc_private.h --- a/tools/libxc/xc_private.h Tue Nov 22 17:21:22 2005 +++ b/tools/libxc/xc_private.h Tue Nov 22 17:44:08 2005 @@ -21,9 +21,8 @@ reason, we must zero the privcmd_hypercall_t or dom0_op_t instance before a call, if using valgrind. */ #ifdef VALGRIND -#define DECLARE_HYPERCALL privcmd_hypercall_t hypercall; \ - memset(&hypercall, 0, sizeof(hypercall)) -#define DECLARE_DOM0_OP dom0_op_t op; memset(&op, 0, sizeof(op)) +#define DECLARE_HYPERCALL privcmd_hypercall_t hypercall = { 0 } +#define DECLARE_DOM0_OP dom0_op_t op = { 0 } #else #define DECLARE_HYPERCALL privcmd_hypercall_t hypercall #define DECLARE_DOM0_OP dom0_op_t op diff -r f7bee3cb1bf1 -r c7508abc5b6b tools/libxc/xg_private.h --- a/tools/libxc/xg_private.h Tue Nov 22 17:21:22 2005 +++ b/tools/libxc/xg_private.h Tue Nov 22 17:44:08 2005 @@ -20,7 +20,7 @@ reason, we must zero the dom0_op_t instance before a call, if using valgrind. */ #ifdef VALGRIND -#define DECLARE_DOM0_OP dom0_op_t op; memset(&op, 0, sizeof(op)) +#define DECLARE_DOM0_OP dom0_op_t op = { 0 } #else #define DECLARE_DOM0_OP dom0_op_t op #endif diff -r f7bee3cb1bf1 -r c7508abc5b6b tools/xenstore/xenstore_client.c --- a/tools/xenstore/xenstore_client.c Tue Nov 22 17:21:22 2005 +++ b/tools/xenstore/xenstore_client.c Tue Nov 22 17:44:08 2005 @@ -109,7 +109,7 @@ necessary. */ - char *path = argv[optind]; + char *slash, *path = argv[optind]; if (tidy) { /* Copy path, because we can't modify argv because we will need it @@ -123,7 +123,7 @@ return 1; } - char *slash = strrchr(p, '/'); + slash = strrchr(p, '/'); if (slash) { char *val; *slash = '\0'; diff -r f7bee3cb1bf1 -r c7508abc5b6b xen/arch/x86/audit.c --- a/xen/arch/x86/audit.c Tue Nov 22 17:21:22 2005 +++ b/xen/arch/x86/audit.c Tue Nov 22 17:44:08 2005 @@ -55,10 +55,11 @@ void _adjust(struct pfn_info *page, int adjtype ADJUST_EXTRA_ARGS) { + int count; + if ( adjtype ) { - // adjust the type count - // + /* adjust the type count */ int tcount = page->u.inuse.type_info & PGT_count_mask; tcount += dir; ttot++; @@ -92,10 +93,8 @@ page->u.inuse.type_info += dir; } - // adjust the general count - // - int count = page->count_info & PGC_count_mask; - count += dir; + /* adjust the general count */ + count = (page->count_info & PGC_count_mask) + dir; ctot++; if ( count < 0 ) @@ -124,6 +123,7 @@ { unsigned long *pt = map_domain_page(mfn); int i; + u32 page_type; for ( i = 0; i < l2limit; i++ ) { @@ -147,8 +147,7 @@ continue; } - u32 page_type = l1page->u.inuse.type_info & PGT_type_mask; - + page_type = l1page->u.inuse.type_info & PGT_type_mask; if ( page_type != PGT_l1_shadow ) { printk("Audit %d: [Shadow L2 mfn=%lx i=%x] " @@ -174,8 +173,7 @@ continue; } - u32 page_type = l1page->u.inuse.type_info & PGT_type_mask; - + page_type = l1page->u.inuse.type_info & PGT_type_mask; if ( page_type == PGT_l2_page_table ) { printk("Audit %d: [%x] Found %s Linear PT " @@ -741,6 +739,7 @@ while ( list_ent != &d->page_list ) { u32 page_type; + unsigned long pfn; page = list_entry(list_ent, struct pfn_info, list); mfn = page_to_pfn(page); @@ -797,7 +796,7 @@ printk("out of sync page mfn=%lx is not a page table\n", mfn); errors++; } - unsigned long pfn = __mfn_to_gpfn(d, mfn); + pfn = __mfn_to_gpfn(d, mfn); if ( !__shadow_status(d, pfn, PGT_snapshot) ) { printk("out of sync page mfn=%lx doesn't have a snapshot\n", diff -r f7bee3cb1bf1 -r c7508abc5b6b xen/arch/x86/dm/vmx_vioapic.c --- a/xen/arch/x86/dm/vmx_vioapic.c Tue Nov 22 17:21:22 2005 +++ b/xen/arch/x86/dm/vmx_vioapic.c Tue Nov 22 17:44:08 2005 @@ -52,8 +52,6 @@ static void ioapic_dump_redir(vmx_vioapic_t *s, uint8_t entry) { - ASSERT(s); - RedirStatus redir = s->redirtbl[entry]; VMX_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_dump_redir " diff -r f7bee3cb1bf1 -r c7508abc5b6b xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Tue Nov 22 17:21:22 2005 +++ b/xen/arch/x86/mm.c Tue Nov 22 17:44:08 2005 @@ -521,9 +521,9 @@ l3_pgentry_t l3e, unsigned long pfn, struct domain *d, unsigned long vaddr) { - ASSERT( !shadow_mode_refcounts(d) ); - int rc; + + ASSERT(!shadow_mode_refcounts(d)); if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) return 1; @@ -1880,19 +1880,18 @@ case MMUEXT_SET_LDT: { + unsigned long ptr = op.arg1.linear_addr; + unsigned long ents = op.arg2.nr_ents; + if ( shadow_mode_external(d) ) { MEM_LOG("ignoring SET_LDT hypercall from external " "domain %u", d->domain_id); okay = 0; - break; } - - unsigned long ptr = op.arg1.linear_addr; - unsigned long ents = op.arg2.nr_ents; - if ( ((ptr & (PAGE_SIZE-1)) != 0) || - (ents > 8192) || - !array_access_ok(ptr, ents, LDT_ENTRY_SIZE) ) + else if ( ((ptr & (PAGE_SIZE-1)) != 0) || + (ents > 8192) || + !array_access_ok(ptr, ents, LDT_ENTRY_SIZE) ) { okay = 0; MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%lx", ptr, ents); diff -r f7bee3cb1bf1 -r c7508abc5b6b xen/arch/x86/shadow.c --- a/xen/arch/x86/shadow.c Tue Nov 22 17:21:22 2005 +++ b/xen/arch/x86/shadow.c Tue Nov 22 17:44:08 2005 @@ -207,6 +207,7 @@ struct pfn_info *page; unsigned long smfn; int pin = 0; + void *l1, *lp; // Currently, we only keep pre-zero'ed pages around for use as L1's... // This will change. Soon. @@ -232,19 +233,19 @@ if (!page) goto no_shadow_page; - void *l1_0 = map_domain_page(page_to_pfn(page)); - memset(l1_0, 0, PAGE_SIZE); - unmap_domain_page(l1_0); - - void *l1_1 = map_domain_page(page_to_pfn(page+1)); - memset(l1_1, 0, PAGE_SIZE); - unmap_domain_page(l1_1); + l1 = map_domain_page(page_to_pfn(page)); + memset(l1, 0, PAGE_SIZE); + unmap_domain_page(l1); + + l1 = map_domain_page(page_to_pfn(page+1)); + memset(l1, 0, PAGE_SIZE); + unmap_domain_page(l1); #else page = alloc_domheap_page(NULL); if (!page) goto no_shadow_page; - void *l1 = map_domain_page(page_to_pfn(page)); + l1 = map_domain_page(page_to_pfn(page)); memset(l1, 0, PAGE_SIZE); unmap_domain_page(l1); #endif @@ -255,7 +256,7 @@ if (!page) goto no_shadow_page; - void *l1 = map_domain_page(page_to_pfn(page)); + l1 = map_domain_page(page_to_pfn(page)); memset(l1, 0, PAGE_SIZE); unmap_domain_page(l1); } @@ -279,7 +280,7 @@ if (!page) goto no_shadow_page; - void *lp = map_domain_page(page_to_pfn(page)); + lp = map_domain_page(page_to_pfn(page)); memset(lp, 0, PAGE_SIZE); unmap_domain_page(lp); } @@ -588,9 +589,11 @@ } #ifndef NDEBUG - l2_pgentry_t old_sl2e; - __shadow_get_l2e(v, va, &old_sl2e); - ASSERT( !(l2e_get_flags(old_sl2e) & _PAGE_PRESENT) ); + { + l2_pgentry_t old_sl2e; + __shadow_get_l2e(v, va, &old_sl2e); + ASSERT(!(l2e_get_flags(old_sl2e) & _PAGE_PRESENT)); + } #endif #if CONFIG_PAGING_LEVELS >=3 @@ -952,14 +955,16 @@ ASSERT(pfn_valid(mfn)); #ifndef NDEBUG - u32 type = page->u.inuse.type_info & PGT_type_mask; - if ( shadow_mode_refcounts(d) ) - { - ASSERT(type == PGT_writable_page); - } - else - { - ASSERT(type && (type < PGT_l4_page_table)); + { + u32 type = page->u.inuse.type_info & PGT_type_mask; + if ( shadow_mode_refcounts(d) ) + { + ASSERT(type == PGT_writable_page); + } + else + { + ASSERT(type && (type < PGT_l4_page_table)); + } } #endif @@ -1438,6 +1443,8 @@ int need_flush = 0, external = shadow_mode_external(d); int unshadow; int changed; + u32 min_max_shadow, min_max_snapshot; + int min_shadow, max_shadow, min_snapshot, max_snapshot; ASSERT(shadow_lock_is_acquired(d)); @@ -1466,7 +1473,7 @@ continue; } - FSH_LOG("resyncing t=%08x gpfn=%lx gmfn=%lx smfn=%lx snapshot_mfn=%lx", + FSH_LOG("resyncing t=%08x gpfn=%lx gmfn=%lx smfn=%lx snapshot_mfn=%lx", stype, entry->gpfn, entry->gmfn, smfn, entry->snapshot_mfn); // Compare guest's new contents to its snapshot, validating @@ -1482,16 +1489,16 @@ unshadow = 0; - u32 min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp; - int min_shadow = SHADOW_MIN(min_max_shadow); - int max_shadow = SHADOW_MAX(min_max_shadow); - - u32 min_max_snapshot = - pfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp; - int min_snapshot = SHADOW_MIN(min_max_snapshot); - int max_snapshot = SHADOW_MAX(min_max_snapshot); - - switch ( stype ) { + min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp; + min_shadow = SHADOW_MIN(min_max_shadow); + max_shadow = SHADOW_MAX(min_max_shadow); + + min_max_snapshot= pfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp; + min_snapshot = SHADOW_MIN(min_max_snapshot); + max_snapshot = SHADOW_MAX(min_max_snapshot); + + switch ( stype ) + { case PGT_l1_shadow: { guest_l1_pgentry_t *guest1 = guest; @@ -1680,9 +1687,9 @@ changed = 0; for ( i = 0; i < GUEST_ROOT_PAGETABLE_ENTRIES; i++ ) { + guest_root_pgentry_t new_root_e = guest_root[i]; if ( !is_guest_l4_slot(i) && !external ) continue; - guest_root_pgentry_t new_root_e = guest_root[i]; if ( root_entry_has_changed( new_root_e, snapshot_root[i], PAGE_FLAG_MASK)) { @@ -1749,6 +1756,7 @@ { struct out_of_sync_entry *entry; int need_flush = 0; + l1_pgentry_t *ppte, opte, npte; perfc_incrc(shadow_sync_all); @@ -1764,11 +1772,10 @@ if ( entry->writable_pl1e & (sizeof(l1_pgentry_t)-1) ) continue; - l1_pgentry_t *ppte = (l1_pgentry_t *)( + ppte = (l1_pgentry_t *)( (char *)map_domain_page(entry->writable_pl1e >> PAGE_SHIFT) + (entry->writable_pl1e & ~PAGE_MASK)); - l1_pgentry_t opte = *ppte; - l1_pgentry_t npte = opte; + opte = npte = *ppte; l1e_remove_flags(npte, _PAGE_RW); if ( (l1e_get_flags(npte) & _PAGE_PRESENT) && @@ -2821,6 +2828,7 @@ unsigned int count; unsigned long sl2mfn; struct pfn_info *page; + void *l2; memset(spl4e, 0, PAGE_SIZE); @@ -2835,7 +2843,7 @@ for (count = 0; count < PDP_ENTRIES; count++) { sl2mfn = page_to_pfn(page+count); - void *l2 = map_domain_page(sl2mfn); + l2 = map_domain_page(sl2mfn); memset(l2, 0, PAGE_SIZE); unmap_domain_page(l2); spl4e[count] = l4e_from_pfn(sl2mfn, _PAGE_PRESENT); diff -r f7bee3cb1bf1 -r c7508abc5b6b xen/arch/x86/shadow32.c --- a/xen/arch/x86/shadow32.c Tue Nov 22 17:21:22 2005 +++ b/xen/arch/x86/shadow32.c Tue Nov 22 17:44:08 2005 @@ -208,6 +208,7 @@ struct pfn_info *page; unsigned long smfn; int pin = 0; + void *l1; // Currently, we only keep pre-zero'ed pages around for use as L1's... // This will change. Soon. @@ -224,7 +225,7 @@ else { page = alloc_domheap_page(NULL); - void *l1 = map_domain_page(page_to_pfn(page)); + l1 = map_domain_page(page_to_pfn(page)); memset(l1, 0, PAGE_SIZE); unmap_domain_page(l1); } @@ -558,6 +559,7 @@ int i; struct shadow_status *x; struct vcpu *v; + struct list_head *list_ent, *tmp; /* * WARNING! The shadow page table must not currently be in use! @@ -697,15 +699,14 @@ xfree(mfn_list); } - // Now free the pre-zero'ed pages from the domain - // - struct list_head *list_ent, *tmp; + /* Now free the pre-zero'ed pages from the domain */ list_for_each_safe(list_ent, tmp, &d->arch.free_shadow_frames) { + struct pfn_info *page = list_entry(list_ent, struct pfn_info, list); + list_del(list_ent); perfc_decr(free_l1_pages); - struct pfn_info *page = list_entry(list_ent, struct pfn_info, list); free_domheap_page(page); } @@ -1218,6 +1219,11 @@ void __shadow_mode_disable(struct domain *d) { + struct vcpu *v; +#ifndef NDEBUG + int i; +#endif + if ( unlikely(!shadow_mode_enabled(d)) ) return; @@ -1225,7 +1231,6 @@ free_writable_pte_predictions(d); #ifndef NDEBUG - int i; for ( i = 0; i < shadow_ht_buckets; i++ ) { if ( d->arch.shadow_ht[i].gpfn_and_flags != 0 ) @@ -1242,11 +1247,8 @@ free_shadow_ht_entries(d); free_out_of_sync_entries(d); - struct vcpu *v; for_each_vcpu(d, v) - { update_pagetables(v); - } } static int shadow_mode_table_op( @@ -1423,14 +1425,18 @@ unsigned long gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn) { - ASSERT( shadow_mode_translate(d) ); + unsigned long va, tabpfn; + l1_pgentry_t *l1, l1e; + l2_pgentry_t *l2, l2e; + + ASSERT(shadow_mode_translate(d)); perfc_incrc(gpfn_to_mfn_foreign); - unsigned long va = gpfn << PAGE_SHIFT; - unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table); - l2_pgentry_t *l2 = map_domain_page(tabpfn); - l2_pgentry_t l2e = l2[l2_table_offset(va)]; + va = gpfn << PAGE_SHIFT; + tabpfn = pagetable_get_pfn(d->arch.phys_table); + l2 = map_domain_page(tabpfn); + l2e = l2[l2_table_offset(va)]; unmap_domain_page(l2); if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) { @@ -1438,8 +1444,8 @@ d->domain_id, gpfn, l2e_get_intpte(l2e)); return INVALID_MFN; } - l1_pgentry_t *l1 = map_domain_page(l2e_get_pfn(l2e)); - l1_pgentry_t l1e = l1[l1_table_offset(va)]; + l1 = map_domain_page(l2e_get_pfn(l2e)); + l1e = l1[l1_table_offset(va)]; unmap_domain_page(l1); #if 0 @@ -1634,9 +1640,11 @@ } #ifndef NDEBUG - l2_pgentry_t old_sl2e; - __shadow_get_l2e(v, va, &old_sl2e); - ASSERT( !(l2e_get_flags(old_sl2e) & _PAGE_PRESENT) ); + { + l2_pgentry_t old_sl2e; + __shadow_get_l2e(v, va, &old_sl2e); + ASSERT( !(l2e_get_flags(old_sl2e) & _PAGE_PRESENT) ); + } #endif if ( !get_shadow_ref(sl1mfn) ) @@ -1840,14 +1848,16 @@ ASSERT(pfn_valid(mfn)); #ifndef NDEBUG - u32 type = page->u.inuse.type_info & PGT_type_mask; - if ( shadow_mode_refcounts(d) ) - { - ASSERT(type == PGT_writable_page); - } - else - { - ASSERT(type && (type < PGT_l4_page_table)); + { + u32 type = page->u.inuse.type_info & PGT_type_mask; + if ( shadow_mode_refcounts(d) ) + { + ASSERT(type == PGT_writable_page); + } + else + { + ASSERT(type && (type < PGT_l4_page_table)); + } } #endif @@ -2329,6 +2339,8 @@ int need_flush = 0, external = shadow_mode_external(d); int unshadow; int changed; + u32 min_max_shadow, min_max_snapshot; + int min_shadow, max_shadow, min_snapshot, max_snapshot; ASSERT(shadow_lock_is_acquired(d)); @@ -2388,14 +2400,14 @@ if ( !smfn ) break; - u32 min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp; - int min_shadow = SHADOW_MIN(min_max_shadow); - int max_shadow = SHADOW_MAX(min_max_shadow); - - u32 min_max_snapshot = + min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp; + min_shadow = SHADOW_MIN(min_max_shadow); + max_shadow = SHADOW_MAX(min_max_shadow); + + min_max_snapshot = pfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp; - int min_snapshot = SHADOW_MIN(min_max_snapshot); - int max_snapshot = SHADOW_MAX(min_max_snapshot); + min_snapshot = SHADOW_MIN(min_max_snapshot); + max_snapshot = SHADOW_MAX(min_max_snapshot); changed = 0; @@ -2454,13 +2466,11 @@ changed = 0; for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) { -#if CONFIG_X86_PAE - BUG(); /* FIXME: need type_info */ -#endif + l2_pgentry_t new_pde = guest2[i]; + if ( !is_guest_l2_slot(0,i) && !external ) continue; - l2_pgentry_t new_pde = guest2[i]; if ( l2e_has_changed(new_pde, snapshot2[i], PAGE_FLAG_MASK)) { need_flush |= validate_pde_change(d, new_pde, &shadow2[i]); @@ -2500,13 +2510,11 @@ changed = 0; for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) { -#if CONFIG_X86_PAE - BUG(); /* FIXME: need type_info */ -#endif + l2_pgentry_t new_pde = guest2[i]; + if ( !is_guest_l2_slot(0, i) && !external ) continue; - l2_pgentry_t new_pde = guest2[i]; if ( l2e_has_changed(new_pde, snapshot2[i], PAGE_FLAG_MASK) ) { need_flush |= validate_hl2e_change(d, new_pde, &shadow2[i]); @@ -2554,6 +2562,7 @@ { struct out_of_sync_entry *entry; int need_flush = 0; + l1_pgentry_t *ppte, opte, npte; perfc_incrc(shadow_sync_all); @@ -2569,11 +2578,10 @@ if ( entry->writable_pl1e & (sizeof(l1_pgentry_t)-1) ) continue; - l1_pgentry_t *ppte = (l1_pgentry_t *)( + ppte = (l1_pgentry_t *)( (char *)map_domain_page(entry->writable_pl1e >> PAGE_SHIFT) + (entry->writable_pl1e & ~PAGE_MASK)); - l1_pgentry_t opte = *ppte; - l1_pgentry_t npte = opte; + opte = npte = *ppte; l1e_remove_flags(npte, _PAGE_RW); if ( (l1e_get_flags(npte) & _PAGE_PRESENT) && diff -r f7bee3cb1bf1 -r c7508abc5b6b xen/arch/x86/shadow_public.c --- a/xen/arch/x86/shadow_public.c Tue Nov 22 17:21:22 2005 +++ b/xen/arch/x86/shadow_public.c Tue Nov 22 17:44:08 2005 @@ -786,6 +786,7 @@ int i; struct shadow_status *x; struct vcpu *v; + struct list_head *list_ent, *tmp; /* * WARNING! The shadow page table must not currently be in use! @@ -884,15 +885,14 @@ xfree(mfn_list); } - // Now free the pre-zero'ed pages from the domain - // - struct list_head *list_ent, *tmp; + /* Now free the pre-zero'ed pages from the domain. */ list_for_each_safe(list_ent, tmp, &d->arch.free_shadow_frames) { + struct pfn_info *page = list_entry(list_ent, struct pfn_info, list); + list_del(list_ent); perfc_decr(free_l1_pages); - struct pfn_info *page = list_entry(list_ent, struct pfn_info, list); if (d->arch.ops->guest_paging_levels == PAGING_L2) { #if CONFIG_PAGING_LEVELS >=4 @@ -912,6 +912,11 @@ void __shadow_mode_disable(struct domain *d) { + struct vcpu *v; +#ifndef NDEBUG + int i; +#endif + if ( unlikely(!shadow_mode_enabled(d)) ) return; @@ -919,7 +924,6 @@ free_writable_pte_predictions(d); #ifndef NDEBUG - int i; for ( i = 0; i < shadow_ht_buckets; i++ ) { if ( d->arch.shadow_ht[i].gpfn_and_flags != 0 ) @@ -936,11 +940,8 @@ free_shadow_ht_entries(d); free_out_of_sync_entries(d); - struct vcpu *v; for_each_vcpu(d, v) - { update_pagetables(v); - } } @@ -1608,14 +1609,18 @@ unsigned long gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn) { - ASSERT( shadow_mode_translate(d) ); + unsigned long va, tabpfn; + l1_pgentry_t *l1, l1e; + l2_pgentry_t *l2, l2e; + + ASSERT(shadow_mode_translate(d)); perfc_incrc(gpfn_to_mfn_foreign); - unsigned long va = gpfn << PAGE_SHIFT; - unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table); - l2_pgentry_t *l2 = map_domain_page(tabpfn); - l2_pgentry_t l2e = l2[l2_table_offset(va)]; + va = gpfn << PAGE_SHIFT; + tabpfn = pagetable_get_pfn(d->arch.phys_table); + l2 = map_domain_page(tabpfn); + l2e = l2[l2_table_offset(va)]; unmap_domain_page(l2); if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) { @@ -1623,8 +1628,8 @@ d->domain_id, gpfn, l2e_get_intpte(l2e)); return INVALID_MFN; } - l1_pgentry_t *l1 = map_domain_page(l2e_get_pfn(l2e)); - l1_pgentry_t l1e = l1[l1_table_offset(va)]; + l1 = map_domain_page(l2e_get_pfn(l2e)); + l1e = l1[l1_table_offset(va)]; unmap_domain_page(l1); #if 0 diff -r f7bee3cb1bf1 -r c7508abc5b6b xen/arch/x86/vmx.c --- a/xen/arch/x86/vmx.c Tue Nov 22 17:21:22 2005 +++ b/xen/arch/x86/vmx.c Tue Nov 22 17:44:08 2005 @@ -129,15 +129,14 @@ */ void vmx_load_msrs(struct vcpu *n) { - struct msr_state *host_state; - host_state = &percpu_msr[smp_processor_id()]; + struct msr_state *host_state = &percpu_msr[smp_processor_id()]; + int i; if ( !vmx_switch_on ) return; - while (host_state->flags){ - int i; - + while ( host_state->flags ) + { i = find_first_set_bit(host_state->flags); wrmsrl(msr_data_index[i], host_state->msr_items[i]); clear_bit(i, &host_state->flags); @@ -146,11 +145,10 @@ static void vmx_save_init_msrs(void) { - struct msr_state *host_state; - host_state = &percpu_msr[smp_processor_id()]; + struct msr_state *host_state = &percpu_msr[smp_processor_id()]; int i; - for (i = 0; i < VMX_MSR_COUNT; i++) + for ( i = 0; i < VMX_MSR_COUNT; i++ ) rdmsrl(msr_data_index[i], host_state->msr_items[i]); } @@ -516,23 +514,20 @@ cpuid(input, &eax, &ebx, &ecx, &edx); - if (input == 1) { + if ( input == 1 ) + { if ( vmx_apic_support(v->domain) && - !vlapic_global_enabled((VLAPIC(v))) ) + !vlapic_global_enabled((VLAPIC(v))) ) clear_bit(X86_FEATURE_APIC, &edx); -#ifdef __i386__ - clear_bit(X86_FEATURE_PSE, &edx); - clear_bit(X86_FEATURE_PAE, &edx); - clear_bit(X86_FEATURE_PSE36, &edx); -#else - struct vcpu *v = current; - if (v->domain->arch.ops->guest_paging_levels == PAGING_L2) + +#ifdef __x86_64__ + if ( v->domain->arch.ops->guest_paging_levels == PAGING_L2 ) +#endif { clear_bit(X86_FEATURE_PSE, &edx); clear_bit(X86_FEATURE_PAE, &edx); clear_bit(X86_FEATURE_PSE36, &edx); } -#endif /* Unsupportable for virtualised CPUs. */ clear_bit(X86_FEATURE_VMXE & 31, &ecx); @@ -1084,6 +1079,7 @@ unsigned long eip; int paging_enabled; unsigned long vm_entry_value; + /* * CR0: We don't want to lose PE and PG. */ @@ -1140,14 +1136,17 @@ #endif } - unsigned long crn; - /* update CR4's PAE if needed */ - __vmread(GUEST_CR4, &crn); - if ( (!(crn & X86_CR4_PAE)) && - test_bit(VMX_CPU_STATE_PAE_ENABLED, - &v->arch.arch_vmx.cpu_state)){ - VMX_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n"); - __vmwrite(GUEST_CR4, crn | X86_CR4_PAE); + { + unsigned long crn; + /* update CR4's PAE if needed */ + __vmread(GUEST_CR4, &crn); + if ( (!(crn & X86_CR4_PAE)) && + test_bit(VMX_CPU_STATE_PAE_ENABLED, + &v->arch.arch_vmx.cpu_state) ) + { + VMX_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n"); + __vmwrite(GUEST_CR4, crn | X86_CR4_PAE); + } } #endif /* diff -r f7bee3cb1bf1 -r c7508abc5b6b xen/arch/x86/x86_32/traps.c --- a/xen/arch/x86/x86_32/traps.c Tue Nov 22 17:21:22 2005 +++ b/xen/arch/x86/x86_32/traps.c Tue Nov 22 17:44:08 2005 @@ -167,6 +167,7 @@ void __init percpu_traps_init(void) { + struct tss_struct *tss = &doublefault_tss; asmlinkage int hypercall(void); if ( smp_processor_id() != 0 ) @@ -184,7 +185,6 @@ * Make a separate task for double faults. This will get us debug output if * we blow the kernel stack. */ - struct tss_struct *tss = &doublefault_tss; memset(tss, 0, sizeof(*tss)); tss->ds = __HYPERVISOR_DS; tss->es = __HYPERVISOR_DS; diff -r f7bee3cb1bf1 -r c7508abc5b6b xen/common/sched_sedf.c --- a/xen/common/sched_sedf.c Tue Nov 22 17:21:22 2005 +++ b/xen/common/sched_sedf.c Tue Nov 22 17:44:08 2005 @@ -704,11 +704,12 @@ struct list_head *waitq = WAITQ(cpu); #if (EXTRA > EXTRA_OFF) struct sedf_vcpu_info *inf = EDOM_INFO(current); - struct list_head *extraq[] = {EXTRAQ(cpu, EXTRA_PEN_Q), - EXTRAQ(cpu, EXTRA_UTIL_Q)}; -#endif - struct task_slice ret; - /*int i = 0;*/ + struct list_head *extraq[] = { + EXTRAQ(cpu, EXTRA_PEN_Q), EXTRAQ(cpu, EXTRA_UTIL_Q)}; +#endif + struct sedf_vcpu_info *runinf, *waitinf; + struct task_slice ret; + /*idle tasks don't need any of the following stuf*/ if (is_idle_task(current->domain)) goto check_waitq; @@ -737,7 +738,6 @@ /*now simply pick the first domain from the runqueue, which has the earliest deadline, because the list is sorted*/ - struct sedf_vcpu_info *runinf, *waitinf; if (!list_empty(runq)) { runinf = list_entry(runq->next,struct sedf_vcpu_info,list); diff -r f7bee3cb1bf1 -r c7508abc5b6b xen/include/asm-x86/shadow.h --- a/xen/include/asm-x86/shadow.h Tue Nov 22 17:21:22 2005 +++ b/xen/include/asm-x86/shadow.h Tue Nov 22 17:44:08 2005 @@ -173,11 +173,12 @@ static inline int page_is_page_table(struct pfn_info *page) { struct domain *owner = page_get_owner(page); + u32 type_info; if ( owner && shadow_mode_refcounts(owner) ) return page->count_info & PGC_page_table; - u32 type_info = page->u.inuse.type_info & PGT_type_mask; + type_info = page->u.inuse.type_info & PGT_type_mask; return type_info && (type_info <= PGT_l4_page_table); } _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |