[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Code cleanups after page offline patch.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1236871896 0 # Node ID dd3219cd019a173b3f393ab4a719df0068a818ee # Parent c9a35fb19e75ada4296c7c4adac06927313e5720 Code cleanups after page offline patch. Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- xen/common/page_alloc.c | 103 ++++++++++++++++++++--------------------------- xen/common/sysctl.c | 18 ++++---- xen/include/asm-x86/mm.h | 31 ++++++-------- 3 files changed, 71 insertions(+), 81 deletions(-) diff -r c9a35fb19e75 -r dd3219cd019a xen/common/page_alloc.c --- a/xen/common/page_alloc.c Thu Mar 12 15:08:08 2009 +0000 +++ b/xen/common/page_alloc.c Thu Mar 12 15:31:36 2009 +0000 @@ -75,11 +75,11 @@ PAGE_LIST_HEAD(page_scrub_list); PAGE_LIST_HEAD(page_scrub_list); static unsigned long scrub_pages; -/* Offlined page list, protected by heap_lock */ +/* Offlined page list, protected by heap_lock. */ PAGE_LIST_HEAD(page_offlined_list); - -/* Broken page list, protected by heap_lock */ +/* Broken page list, protected by heap_lock. */ PAGE_LIST_HEAD(page_broken_list); + /********************* * ALLOCATION BITMAP * One bit per page of memory. Bit set => page is allocated. @@ -427,9 +427,7 @@ static struct page_info *alloc_heap_page return pg; } -/* - * Remove any offlined page in the buddy poined by head - */ +/* Remove any offlined page in the buddy pointed to by head. */ static int reserve_offlined_page(struct page_info *head) { unsigned int node = phys_to_nid(page_to_maddr(head)); @@ -448,7 +446,7 @@ static int reserve_offlined_page(struct struct page_info *pg; int next_order; - if (test_bit(_PGC_offlined, &cur_head->count_info)) + if ( test_bit(_PGC_offlined, &cur_head->count_info) ) { cur_head++; continue; @@ -456,29 +454,27 @@ static int reserve_offlined_page(struct next_order = cur_order = 0; - while (cur_order < head_order) + while ( cur_order < head_order ) { next_order = cur_order + 1; - if ( (cur_head + (1 << next_order)) >= (head + ( 1 << head_order))) + if ( (cur_head + (1 << next_order)) >= (head + ( 1 << head_order)) ) goto merge; - for (i = (1 << cur_order), pg = cur_head + (1 << cur_order); - i < (1 << next_order); - i++, pg ++) - if (test_bit(_PGC_offlined, &pg->count_info)) + for ( i = (1 << cur_order), pg = cur_head + (1 << cur_order ); + i < (1 << next_order); + i++, pg++ ) + if ( test_bit(_PGC_offlined, &pg->count_info) ) break; - if (i == ( 1 << next_order)) + if ( i == ( 1 << next_order) ) { cur_order = next_order; continue; } else { - /* - * We don't need considering merge outside the head_order - */ -merge: + merge: + /* We don't consider merging outside the head_order. */ page_list_add_tail(cur_head, &heap(node, zone, cur_order)); PFN_ORDER(cur_head) = cur_order; cur_head += (1 << cur_order); @@ -487,21 +483,20 @@ merge: } } - for (cur_head = head; cur_head < head + ( 1UL << head_order); cur_head++) - { - if (!test_bit(_PGC_offlined, &cur_head->count_info)) + for ( cur_head = head; cur_head < head + ( 1UL << head_order); cur_head++ ) + { + if ( !test_bit(_PGC_offlined, &cur_head->count_info) ) continue; - avail[node][zone] --; + avail[node][zone]--; map_alloc(page_to_mfn(cur_head), 1); - if (test_bit(_PGC_broken, &cur_head->count_info)) - page_list_add_tail(cur_head, &page_broken_list); - else - page_list_add_tail(cur_head, &page_offlined_list); - - count ++; + page_list_add_tail(cur_head, + test_bit(_PGC_broken, &cur_head->count_info) ? + &page_broken_list : &page_offlined_list); + + count++; } return count; @@ -534,7 +529,7 @@ static void free_heap_pages( */ ASSERT(!(pg[i].count_info & PGC_offlined)); pg[i].count_info &= PGC_offlining | PGC_broken; - if (pg[i].count_info & PGC_offlining) + if ( pg[i].count_info & PGC_offlining ) { pg[i].count_info &= ~PGC_offlining; pg[i].count_info |= PGC_offlined; @@ -584,7 +579,7 @@ static void free_heap_pages( PFN_ORDER(pg) = order; page_list_add_tail(pg, &heap(node, zone, order)); - if (tainted) + if ( tainted ) reserve_offlined_page(pg); spin_unlock(&heap_lock); @@ -607,9 +602,6 @@ static unsigned long mark_page_offline(s unsigned long nx, x, y = pg->count_info; ASSERT(page_is_ram_type(page_to_mfn(pg), RAM_TYPE_CONVENTIONAL)); - /* - * Caller gurantee the page will not be reassigned during this process - */ ASSERT(spin_is_locked(&heap_lock)); do { @@ -617,21 +609,23 @@ static unsigned long mark_page_offline(s if ( ((x & PGC_offlined_broken) == PGC_offlined_broken) ) return y; - /* PGC_offlined means it is free pages */ - if (x & PGC_offlined) - { - if (broken && !(nx & PGC_broken)) + + if ( x & PGC_offlined ) + { + /* PGC_offlined means it is a free page. */ + if ( broken && !(nx & PGC_broken) ) nx |= PGC_broken; else return y; } - /* It is not offlined, not reserved page */ - else if ( allocated_in_map(page_to_mfn(pg)) ) - nx |= PGC_offlining; else - nx |= PGC_offlined; - - if (broken) + { + /* It is not offlined, not reserved page */ + nx |= (allocated_in_map(page_to_mfn(pg)) ? + PGC_offlining : PGC_offlined); + } + + if ( broken ) nx |= PGC_broken; } while ( (y = cmpxchg(&pg->count_info, x, nx)) != x ); @@ -644,7 +638,6 @@ static int reserve_heap_page(struct page unsigned int i, node = phys_to_nid(page_to_maddr(pg)); unsigned int zone = page_to_zone(pg); - /* get the header */ for ( i = 0; i <= MAX_ORDER; i++ ) { struct page_info *tmp; @@ -652,7 +645,7 @@ static int reserve_heap_page(struct page if ( page_list_empty(&heap(node, zone, i)) ) continue; - page_list_for_each_safe(head, tmp, &heap(node, zone, i)) + page_list_for_each_safe ( head, tmp, &heap(node, zone, i) ) { if ( (head <= pg) && (head + (1UL << i) > pg) ) @@ -664,9 +657,6 @@ static int reserve_heap_page(struct page } -/* - * offline one page - */ int offline_page(unsigned long mfn, int broken, uint32_t *status) { unsigned long old_info = 0; @@ -674,7 +664,7 @@ int offline_page(unsigned long mfn, int int ret = 0; struct page_info *pg; - if (mfn > max_page) + if ( mfn > max_page ) { dprintk(XENLOG_WARNING, "try to offline page out of range %lx\n", mfn); @@ -683,7 +673,6 @@ int offline_page(unsigned long mfn, int *status = 0; pg = mfn_to_page(mfn); - #if defined(__x86_64__) /* Xen's txt mfn in x86_64 is reserved in e820 */ @@ -701,7 +690,7 @@ int offline_page(unsigned long mfn, int * N.B. xen's txt in x86_64 is marked reserved and handled already * Also kexec range is reserved */ - if (!page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL)) + if ( !page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL) ) { *status = PG_OFFLINE_FAILED | PG_OFFLINE_NOT_CONV_RAM; return -EINVAL; @@ -717,11 +706,11 @@ int offline_page(unsigned long mfn, int reserve_heap_page(pg); *status = PG_OFFLINE_OFFLINED; } - else if (test_bit(_PGC_offlined, &pg->count_info)) + else if ( test_bit(_PGC_offlined, &pg->count_info) ) { *status = PG_OFFLINE_OFFLINED; } - else if ((owner = page_get_owner_and_reference(pg))) + else if ( (owner = page_get_owner_and_reference(pg)) ) { *status = PG_OFFLINE_OWNED | PG_OFFLINE_PENDING | (owner->domain_id << PG_OFFLINE_OWNER_SHIFT); @@ -747,7 +736,7 @@ int offline_page(unsigned long mfn, int (DOMID_INVALID << PG_OFFLINE_OWNER_SHIFT ); } - if (broken) + if ( broken ) *status |= PG_OFFLINE_BROKEN; spin_unlock(&heap_lock); @@ -782,21 +771,21 @@ unsigned int online_page(unsigned long m ret = -EINVAL; *status = PG_ONLINE_FAILED |PG_ONLINE_BROKEN; } - else if (pg->count_info & PGC_offlined) + else if ( pg->count_info & PGC_offlined ) { clear_bit(_PGC_offlined, &pg->count_info); page_list_del(pg, &page_offlined_list); *status = PG_ONLINE_ONLINED; free = 1; } - else if (pg->count_info & PGC_offlining) + else if ( pg->count_info & PGC_offlining ) { clear_bit(_PGC_offlining, &pg->count_info); *status = PG_ONLINE_ONLINED; } spin_unlock(&heap_lock); - if (free) + if ( free ) free_heap_pages(pg, 0); return ret; diff -r c9a35fb19e75 -r dd3219cd019a xen/common/sysctl.c --- a/xen/common/sysctl.c Thu Mar 12 15:08:08 2009 +0000 +++ b/xen/common/sysctl.c Thu Mar 12 15:31:36 2009 +0000 @@ -241,7 +241,7 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysc ptr = status = xmalloc_bytes( sizeof(uint32_t) * (op->u.page_offline.end - op->u.page_offline.start + 1)); - if (!status) + if ( !status ) { dprintk(XENLOG_WARNING, "Out of memory for page offline op\n"); ret = -ENOMEM; @@ -255,7 +255,7 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysc pfn <= op->u.page_offline.end; pfn ++ ) { - switch (op->u.page_offline.cmd) + switch ( op->u.page_offline.cmd ) { /* Shall revert her if failed, or leave caller do it? */ case sysctl_page_offline: @@ -278,12 +278,14 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysc break; } - if (copy_to_guest(op->u.page_offline.status, status, - op->u.page_offline.end - op->u.page_offline.start + 1)) - { - ret = -EFAULT; - break; - } + if ( copy_to_guest( + op->u.page_offline.status, status, + op->u.page_offline.end - op->u.page_offline.start + 1) ) + { + ret = -EFAULT; + break; + } + xfree(status); } break; diff -r c9a35fb19e75 -r dd3219cd019a xen/include/asm-x86/mm.h --- a/xen/include/asm-x86/mm.h Thu Mar 12 15:08:08 2009 +0000 +++ b/xen/include/asm-x86/mm.h Thu Mar 12 15:31:36 2009 +0000 @@ -198,26 +198,25 @@ struct page_info /* 3-bit PAT/PCD/PWT cache-attribute hint. */ #define PGC_cacheattr_base PG_shift(6) #define PGC_cacheattr_mask PG_mask(7, 6) - - /* Page is broken? */ - #define _PGC_broken PG_shift(7) - #define PGC_broken PG_mask(1, 7) - /* Page is offline pending ? */ - #define _PGC_offlining PG_shift(8) - #define PGC_offlining PG_mask(1, 8) - /* Page is offlined */ - #define _PGC_offlined PG_shift(9) - #define PGC_offlined PG_mask(1, 9) - #define PGC_offlined_broken (PGC_offlined | PGC_broken) - - #define is_page_offlining(page) ((page)->count_info & PGC_offlining) - #define is_page_offlined(page) ((page)->count_info & PGC_offlined) - #define is_page_broken(page) ((page)->count_info & PGC_broken) - #define is_page_online(page) (!is_page_offlined(page)) + /* Page is broken? */ +#define _PGC_broken PG_shift(7) +#define PGC_broken PG_mask(1, 7) + /* Page is offline pending ? */ +#define _PGC_offlining PG_shift(8) +#define PGC_offlining PG_mask(1, 8) + /* Page is offlined */ +#define _PGC_offlined PG_shift(9) +#define PGC_offlined PG_mask(1, 9) +#define PGC_offlined_broken (PGC_offlined | PGC_broken) /* Count of references to this frame. */ #define PGC_count_width PG_shift(9) #define PGC_count_mask ((1UL<<PGC_count_width)-1) + +#define is_page_offlining(page) ((page)->count_info & PGC_offlining) +#define is_page_offlined(page) ((page)->count_info & PGC_offlined) +#define is_page_broken(page) ((page)->count_info & PGC_broken) +#define is_page_online(page) (!is_page_offlined(page)) #if defined(__i386__) #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page)) _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |