[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/2] xen/mm: fold PGC_broken into PGC_state bits
From: David Woodhouse <dwmw@xxxxxxxxxxxx> Only PGC_state_offlining and PGC_state_offlined are valid in conjunction with PGC_broken. The other two states (free and inuse) were never valid for a broken page. By folding PGC_broken in, we can have three bits for PGC_state which allows up to 8 states, of which 6 are currently used and 2 are available for new use cases. Signed-off-by: David Woodhouse <dwmw@xxxxxxxxxxxx> --- xen/arch/x86/domctl.c | 2 +- xen/common/page_alloc.c | 66 ++++++++++++++++++++++------------------ xen/include/asm-arm/mm.h | 38 +++++++++++++++-------- xen/include/asm-x86/mm.h | 36 ++++++++++++++++------ 4 files changed, 89 insertions(+), 53 deletions(-) diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index ed86762fa6..a411f64afa 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -422,7 +422,7 @@ long arch_do_domctl( if ( page->u.inuse.type_info & PGT_pinned ) type |= XEN_DOMCTL_PFINFO_LPINTAB; - if ( page->count_info & PGC_broken ) + if ( page_is_broken(page) ) type = XEN_DOMCTL_PFINFO_BROKEN; } diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index 76d37226df..8d72a64f4e 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -1093,7 +1093,7 @@ static int reserve_offlined_page(struct page_info *head) struct page_info *pg; int next_order; - if ( page_state_is(cur_head, offlined) ) + if ( page_is_offlined(cur_head) ) { cur_head++; if ( first_dirty != INVALID_DIRTY_IDX && first_dirty ) @@ -1113,7 +1113,7 @@ static int reserve_offlined_page(struct page_info *head) for ( i = (1 << cur_order), pg = cur_head + (1 << cur_order ); i < (1 << next_order); i++, pg++ ) - if ( page_state_is(pg, offlined) ) + if ( page_is_offlined(pg) ) break; if ( i == ( 1 << next_order) ) { @@ -1145,16 +1145,20 @@ static int reserve_offlined_page(struct page_info *head) for ( cur_head = head; cur_head < head + ( 1UL << head_order); cur_head++ ) { - if ( !page_state_is(cur_head, offlined) ) + struct page_list_head *list; + + if ( page_state_is(cur_head, offlined) ) + list = &page_offlined_list; + else if (page_state_is(cur_head, broken) ) + list = &page_broken_list; + else continue; avail[node][zone]--; total_avail_pages--; ASSERT(total_avail_pages >= 0); - page_list_add_tail(cur_head, - test_bit(_PGC_broken, &cur_head->count_info) ? - &page_broken_list : &page_offlined_list); + page_list_add_tail(cur_head, list); count++; } @@ -1404,13 +1408,16 @@ static void free_heap_pages( switch ( pg[i].count_info & PGC_state ) { case PGC_state_inuse: - BUG_ON(pg[i].count_info & PGC_broken); pg[i].count_info = PGC_state_free; break; case PGC_state_offlining: - pg[i].count_info = (pg[i].count_info & PGC_broken) | - PGC_state_offlined; + pg[i].count_info = PGC_state_offlined; + tainted = 1; + break; + + case PGC_state_broken_offlining: + pg[i].count_info = PGC_state_broken; tainted = 1; break; @@ -1527,16 +1534,16 @@ static unsigned long mark_page_offline(struct page_info *pg, int broken) do { nx = x = y; - if ( ((x & PGC_state) != PGC_state_offlined) && - ((x & PGC_state) != PGC_state_offlining) ) - { - nx &= ~PGC_state; - nx |= (((x & PGC_state) == PGC_state_free) - ? PGC_state_offlined : PGC_state_offlining); - } + nx &= ~PGC_state; - if ( broken ) - nx |= PGC_broken; + /* If it was already broken, it stays broken */ + if ( pgc_is_broken(x) ) + broken = 1; + + if ( pgc_is_offlined(x) || pgc_is(x, free) ) + nx |= broken ? PGC_state_broken : PGC_state_offlined; + else + nx |= broken ? PGC_state_broken_offlining : PGC_state_offlining; if ( x == nx ) break; @@ -1609,7 +1616,7 @@ int offline_page(mfn_t mfn, int broken, uint32_t *status) * need to prevent malicious guest access the broken page again. * Under such case, hypervisor shutdown guest, preventing recursive mce. */ - if ( (pg->count_info & PGC_broken) && (owner = page_get_owner(pg)) ) + if ( page_is_broken(pg) && (owner = page_get_owner(pg)) ) { *status = PG_OFFLINE_AGAIN; domain_crash(owner); @@ -1620,7 +1627,7 @@ int offline_page(mfn_t mfn, int broken, uint32_t *status) old_info = mark_page_offline(pg, broken); - if ( page_state_is(pg, offlined) ) + if ( page_is_offlined(pg) ) { reserve_heap_page(pg); @@ -1699,19 +1706,18 @@ unsigned int online_page(mfn_t mfn, uint32_t *status) do { ret = *status = 0; - if ( y & PGC_broken ) + if ( pgc_is_broken(y) ) { ret = -EINVAL; - *status = PG_ONLINE_FAILED |PG_ONLINE_BROKEN; + *status = PG_ONLINE_FAILED | PG_ONLINE_BROKEN; break; } - - if ( (y & PGC_state) == PGC_state_offlined ) + else if ( pgc_is(y, offlined) ) { page_list_del(pg, &page_offlined_list); *status = PG_ONLINE_ONLINED; } - else if ( (y & PGC_state) == PGC_state_offlining ) + else if ( pgc_is(y, offlining) ) { *status = PG_ONLINE_ONLINED; } @@ -1726,7 +1732,7 @@ unsigned int online_page(mfn_t mfn, uint32_t *status) spin_unlock(&heap_lock); - if ( (y & PGC_state) == PGC_state_offlined ) + if ( pgc_is(y, offlined) ) free_heap_pages(pg, 0, false); return ret; @@ -1747,11 +1753,11 @@ int query_page_offline(mfn_t mfn, uint32_t *status) pg = mfn_to_page(mfn); - if ( page_state_is(pg, offlining) ) + if ( page_is_offlining(pg) ) *status |= PG_OFFLINE_STATUS_OFFLINE_PENDING; - if ( pg->count_info & PGC_broken ) + if ( page_is_broken(pg) ) *status |= PG_OFFLINE_STATUS_BROKEN; - if ( page_state_is(pg, offlined) ) + if ( page_is_offlined(pg) ) *status |= PG_OFFLINE_STATUS_OFFLINED; spin_unlock(&heap_lock); @@ -2519,7 +2525,7 @@ __initcall(pagealloc_keyhandler_init); void scrub_one_page(struct page_info *pg) { - if ( unlikely(pg->count_info & PGC_broken) ) + if ( unlikely(page_is_broken(pg)) ) return; #ifndef NDEBUG diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h index 7df91280bc..a877791d1c 100644 --- a/xen/include/asm-arm/mm.h +++ b/xen/include/asm-arm/mm.h @@ -108,21 +108,35 @@ struct page_info /* Page is Xen heap? */ #define _PGC_xen_heap PG_shift(2) #define PGC_xen_heap PG_mask(1, 2) -/* ... */ -/* Page is broken? */ -#define _PGC_broken PG_shift(7) -#define PGC_broken PG_mask(1, 7) - /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */ -#define PGC_state PG_mask(3, 9) -#define PGC_state_inuse PG_mask(0, 9) -#define PGC_state_offlining PG_mask(1, 9) -#define PGC_state_offlined PG_mask(2, 9) -#define PGC_state_free PG_mask(3, 9) -#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st) + /* + * Mutually-exclusive page states: + * { inuse, offlining, offlined, free, broken_offlining, broken } + */ +#define PGC_state PG_mask(7, 9) +#define PGC_state_inuse PG_mask(0, 9) +#define PGC_state_offlining PG_mask(1, 9) +#define PGC_state_offlined PG_mask(2, 9) +#define PGC_state_free PG_mask(3, 9) +#define PGC_state_broken_offlining PG_mask(4, 9) /* Broken and offlining */ +#define PGC_state_broken PG_mask(5, 9) /* Broken and offlined */ + +#define pgc_is(pgc, st) (((pgc) & PGC_state) == PGC_state_##st) +#define page_state_is(pg, st) pgc_is((pg)->count_info, st) + +#define pgc_is_broken(pgc) (pgc_is(pgc, broken) || \ + pgc_is(pgc, broken_offlining)) +#define pgc_is_offlined(pgc) (pgc_is(pgc, offlined) || \ + pgc_is(pgc, broken)) +#define pgc_is_offlining(pgc) (pgc_is(pgc, offlining) || \ + pgc_is(pgc, broken_offlining)) + +#define page_is_broken(pg) (pgc_is_broken((pg)->count_info)) +#define page_is_offlined(pg) (pgc_is_broken((pg)->count_info)) +#define page_is_offlining(pg) (pgc_is_broken((pg)->count_info)) + /* Page is not reference counted */ #define _PGC_extra PG_shift(10) #define PGC_extra PG_mask(1, 10) - /* Count of references to this frame. */ #define PGC_count_width PG_shift(10) #define PGC_count_mask ((1UL<<PGC_count_width)-1) diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index a06b2fb81f..1203f1b179 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -67,16 +67,32 @@ /* 3-bit PAT/PCD/PWT cache-attribute hint. */ #define PGC_cacheattr_base PG_shift(6) #define PGC_cacheattr_mask PG_mask(7, 6) - /* Page is broken? */ -#define _PGC_broken PG_shift(7) -#define PGC_broken PG_mask(1, 7) - /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */ -#define PGC_state PG_mask(3, 9) -#define PGC_state_inuse PG_mask(0, 9) -#define PGC_state_offlining PG_mask(1, 9) -#define PGC_state_offlined PG_mask(2, 9) -#define PGC_state_free PG_mask(3, 9) -#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st) + /* + * Mutually-exclusive page states: + * { inuse, offlining, offlined, free, broken_offlining, broken } + */ +#define PGC_state PG_mask(7, 9) +#define PGC_state_inuse PG_mask(0, 9) +#define PGC_state_offlining PG_mask(1, 9) +#define PGC_state_offlined PG_mask(2, 9) +#define PGC_state_free PG_mask(3, 9) +#define PGC_state_broken_offlining PG_mask(4, 9) /* Broken and offlining */ +#define PGC_state_broken PG_mask(5, 9) /* Broken and offlined */ + +#define pgc_is(pgc, st) (((pgc) & PGC_state) == PGC_state_##st) +#define page_state_is(pg, st) pgc_is((pg)->count_info, st) + +#define pgc_is_broken(pgc) (pgc_is(pgc, broken) || \ + pgc_is(pgc, broken_offlining)) +#define pgc_is_offlined(pgc) (pgc_is(pgc, offlined) || \ + pgc_is(pgc, broken)) +#define pgc_is_offlining(pgc) (pgc_is(pgc, offlining) || \ + pgc_is(pgc, broken_offlining)) + +#define page_is_broken(pg) (pgc_is_broken((pg)->count_info)) +#define page_is_offlined(pg) (pgc_is_broken((pg)->count_info)) +#define page_is_offlining(pg) (pgc_is_broken((pg)->count_info)) + /* Page is not reference counted */ #define _PGC_extra PG_shift(10) #define PGC_extra PG_mask(1, 10) -- 2.21.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |