diff -r 7ee8bb40200a xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c Thu Apr 15 19:11:16 2010 +0100 +++ b/xen/arch/x86/mm/p2m.c Fri Apr 16 19:10:27 2010 +0200 @@ -113,9 +113,9 @@ static void audit_p2m(struct domain *d); # define audit_p2m(_d) do { (void)(_d); } while(0) #endif /* P2M_AUDIT */ -// Find the next level's P2M entry, checking for out-of-range gfn's... -// Returns NULL on error. -// +/* Find the next level's P2M entry, checking for out-of-range gfn's... + * Returns NULL on error. + */ static l1_pgentry_t * p2m_find_entry(void *table, unsigned long *gfn_remainder, unsigned long gfn, u32 shift, u32 max) @@ -150,9 +150,9 @@ p2m_alloc_ptp(struct domain *d, unsigned return pg; } -// Walk one level of the P2M table, allocating a new table if required. -// Returns 0 on error. -// +/* Walk one level of the P2M table, allocating a new table if required. + * Returns 0 on error. + */ static int p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table, unsigned long *gfn_remainder, unsigned long gfn, u32 shift, @@ -305,7 +305,7 @@ p2m_pod_cache_add(struct domain *d, return -1; } - for(i=0; i < 1 << order ; i++) { + for(i=0; i < 1 << order; i++) { struct domain * od; p = mfn_to_page(_mfn(mfn_x(mfn) + i)); @@ -337,7 +337,7 @@ p2m_pod_cache_add(struct domain *d, spin_lock(&d->page_alloc_lock); /* First, take all pages off the domain list */ - for(i=0; i < 1 << order ; i++) + for(i=0; i < 1 << order; i++) { p = page + i; page_list_del(p, &d->page_list); @@ -418,7 +418,7 @@ static struct page_info * p2m_pod_cache_ } /* Put the pages back on the domain page_list */ - for ( i = 0 ; i < (1 << order) ; i++ ) + for ( i = 0; i < (1 << order); i++ ) { BUG_ON(page_get_owner(p + i) != d); page_list_add_tail(p + i, &d->page_list); @@ -489,7 +489,7 @@ p2m_pod_set_cache_target(struct domain * spin_unlock(&d->page_alloc_lock); /* Then free them */ - for ( i = 0 ; i < (1 << order) ; i++ ) + for ( i = 0; i < (1 << order); i++ ) { /* Copied from common/memory.c:guest_remove_page() */ if ( unlikely(!get_page(page+i, d)) ) @@ -607,7 +607,7 @@ p2m_pod_empty_cache(struct domain *d) { int i; - for ( i = 0 ; i < SUPERPAGE_PAGES ; i++ ) + for ( i = 0; i < SUPERPAGE_PAGES; i++ ) { BUG_ON(page_get_owner(page + i) != d); page_list_add_tail(page + i, &d->page_list); @@ -1028,7 +1028,7 @@ p2m_pod_emergency_sweep_super(struct dom start = p2md->pod.reclaim_super; limit = (start > POD_SWEEP_LIMIT) ? (start - POD_SWEEP_LIMIT) : 0; - for ( i=p2md->pod.reclaim_super ; i > 0 ; i-=SUPERPAGE_PAGES ) + for ( i = p2md->pod.reclaim_super; i > 0; i -= SUPERPAGE_PAGES ) { p2m_pod_zero_check_superpage(d, i); /* Stop if we're past our limit and we have found *something*. @@ -1061,7 +1061,7 @@ p2m_pod_emergency_sweep(struct domain *d limit = (start > POD_SWEEP_LIMIT) ? (start - POD_SWEEP_LIMIT) : 0; /* FIXME: Figure out how to avoid superpages */ - for ( i=p2md->pod.reclaim_single ; i > 0 ; i-- ) + for ( i = p2md->pod.reclaim_single; i > 0; i-- ) { gfn_to_mfn_query(d, i, &t ); if ( p2m_is_ram(t) ) @@ -1247,12 +1247,12 @@ static int p2m_pod_check_and_populate(st return r; } -// Returns 0 on error (out of memory) +/* Returns 0 on error (out of memory) */ static int p2m_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, unsigned int page_order, p2m_type_t p2mt) { - // XXX -- this might be able to be faster iff current->domain == d + /* XXX -- this might be able to be faster iff current->domain == d */ mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table); void *table =map_domain_page(mfn_x(table_mfn)); unsigned long i, gfn_remainder = gfn; @@ -1779,16 +1779,16 @@ int set_p2m_entry(struct domain *d, unsi return rc; } -// Allocate a new p2m table for a domain. -// -// The structure of the p2m table is that of a pagetable for xen (i.e. it is -// controlled by CONFIG_PAGING_LEVELS). -// -// The alloc_page and free_page functions will be used to get memory to -// build the p2m, and to release it again at the end of day. -// -// Returns 0 for success or -errno. -// +/* Allocate a new p2m table for a domain. + * + * The structure of the p2m table is that of a pagetable for xen (i.e. it is + * controlled by CONFIG_PAGING_LEVELS). + * + * The alloc_page and free_page functions will be used to get memory to + * build the p2m, and to release it again at the end of day. + * + * Returns 0 for success or -errno. + */ int p2m_alloc_table(struct domain *d, struct page_info * (*alloc_page)(struct domain *d), void (*free_page)(struct domain *d, struct page_info *pg)) @@ -2000,18 +2000,17 @@ static void audit_p2m(struct domain *d) /* Audit part two: walk the domain's p2m table, checking the entries. */ if ( pagetable_get_pfn(d->arch.phys_table) != 0 ) { + l3_pgentry_t *l3e; l2_pgentry_t *l2e; l1_pgentry_t *l1e; - int i1, i2; + int i1, i2, i3; #if CONFIG_PAGING_LEVELS == 4 l4_pgentry_t *l4e; l3_pgentry_t *l3e; - int i3, i4; + int i4; l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table))); #else /* CONFIG_PAGING_LEVELS == 3 */ - l3_pgentry_t *l3e; - int i3; l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table))); #endif