[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v1 1/3] mm: pass meminit_context to __free_pages_core()
In preparation for further changes, let's teach __free_pages_core() about the differences of memory hotplug handling. Move the memory hotplug specific handling from generic_online_page() to __free_pages_core(), use adjust_managed_page_count() on the memory hotplug path, and spell out why memory freed via memblock cannot currently use adjust_managed_page_count(). Signed-off-by: David Hildenbrand <david@xxxxxxxxxx> --- mm/internal.h | 3 ++- mm/kmsan/init.c | 2 +- mm/memory_hotplug.c | 9 +-------- mm/mm_init.c | 4 ++-- mm/page_alloc.c | 17 +++++++++++++++-- 5 files changed, 21 insertions(+), 14 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 12e95fdf61e90..3fdee779205ab 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -604,7 +604,8 @@ extern void __putback_isolated_page(struct page *page, unsigned int order, int mt); extern void memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order); -extern void __free_pages_core(struct page *page, unsigned int order); +extern void __free_pages_core(struct page *page, unsigned int order, + enum meminit_context); /* * This will have no effect, other than possibly generating a warning, if the diff --git a/mm/kmsan/init.c b/mm/kmsan/init.c index 3ac3b8921d36f..ca79636f858e5 100644 --- a/mm/kmsan/init.c +++ b/mm/kmsan/init.c @@ -172,7 +172,7 @@ static void do_collection(void) shadow = smallstack_pop(&collect); origin = smallstack_pop(&collect); kmsan_setup_meta(page, shadow, origin, collect.order); - __free_pages_core(page, collect.order); + __free_pages_core(page, collect.order, MEMINIT_EARLY); } } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 171ad975c7cfd..27e3be75edcf7 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -630,14 +630,7 @@ EXPORT_SYMBOL_GPL(restore_online_page_callback); void generic_online_page(struct page *page, unsigned int order) { - /* - * Freeing the page with debug_pagealloc enabled will try to unmap it, - * so we should map it first. This is better than introducing a special - * case in page freeing fast path. - */ - debug_pagealloc_map_pages(page, 1 << order); - __free_pages_core(page, order); - totalram_pages_add(1UL << order); + __free_pages_core(page, order, MEMINIT_HOTPLUG); } EXPORT_SYMBOL_GPL(generic_online_page); diff --git a/mm/mm_init.c b/mm/mm_init.c index 019193b0d8703..feb5b6e8c8875 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1938,7 +1938,7 @@ static void __init deferred_free_range(unsigned long pfn, for (i = 0; i < nr_pages; i++, page++, pfn++) { if (pageblock_aligned(pfn)) set_pageblock_migratetype(page, MIGRATE_MOVABLE); - __free_pages_core(page, 0); + __free_pages_core(page, 0, MEMINIT_EARLY); } } @@ -2513,7 +2513,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn, } } - __free_pages_core(page, order); + __free_pages_core(page, order, MEMINIT_EARLY); } DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2224965ada468..e0c8a8354be36 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1214,7 +1214,8 @@ static void __free_pages_ok(struct page *page, unsigned int order, __count_vm_events(PGFREE, 1 << order); } -void __free_pages_core(struct page *page, unsigned int order) +void __free_pages_core(struct page *page, unsigned int order, + enum meminit_context context) { unsigned int nr_pages = 1 << order; struct page *p = page; @@ -1234,7 +1235,19 @@ void __free_pages_core(struct page *page, unsigned int order) __ClearPageReserved(p); set_page_count(p, 0); - atomic_long_add(nr_pages, &page_zone(page)->managed_pages); + if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && + unlikely(context == MEMINIT_HOTPLUG)) { + /* + * Freeing the page with debug_pagealloc enabled will try to + * unmap it; some archs don't like double-unmappings, so + * map it first. + */ + debug_pagealloc_map_pages(page, nr_pages); + adjust_managed_page_count(page, nr_pages); + } else { + /* memblock adjusts totalram_pages() ahead of time. */ + atomic_long_add(nr_pages, &page_zone(page)->managed_pages); + } if (page_contains_unaccepted(page, order)) { if (order == MAX_PAGE_ORDER && __free_unaccepted(page)) -- 2.45.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |