[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v4 20/48] mm: thp: dynamically allocate the thp-related shrinkers



Use new APIs to dynamically allocate the thp-zero and thp-deferred_split
shrinkers.

Signed-off-by: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx>
---
 mm/huge_memory.c | 69 +++++++++++++++++++++++++++++++-----------------
 1 file changed, 45 insertions(+), 24 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 947001a7cd42..5d0c7a0b651c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -65,7 +65,11 @@ unsigned long transparent_hugepage_flags __read_mostly =
        (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
        (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
 
-static struct shrinker deferred_split_shrinker;
+static struct shrinker *deferred_split_shrinker;
+static unsigned long deferred_split_count(struct shrinker *shrink,
+                                         struct shrink_control *sc);
+static unsigned long deferred_split_scan(struct shrinker *shrink,
+                                        struct shrink_control *sc);
 
 static atomic_t huge_zero_refcount;
 struct page *huge_zero_page __read_mostly;
@@ -229,11 +233,7 @@ static unsigned long shrink_huge_zero_page_scan(struct 
shrinker *shrink,
        return 0;
 }
 
-static struct shrinker huge_zero_page_shrinker = {
-       .count_objects = shrink_huge_zero_page_count,
-       .scan_objects = shrink_huge_zero_page_scan,
-       .seeks = DEFAULT_SEEKS,
-};
+static struct shrinker *huge_zero_page_shrinker;
 
 #ifdef CONFIG_SYSFS
 static ssize_t enabled_show(struct kobject *kobj,
@@ -454,6 +454,40 @@ static inline void hugepage_exit_sysfs(struct kobject 
*hugepage_kobj)
 }
 #endif /* CONFIG_SYSFS */
 
+static int __init thp_shrinker_init(void)
+{
+       huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
+       if (!huge_zero_page_shrinker)
+               return -ENOMEM;
+
+       deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
+                                                SHRINKER_MEMCG_AWARE |
+                                                SHRINKER_NONSLAB,
+                                                "thp-deferred_split");
+       if (!deferred_split_shrinker) {
+               shrinker_free(huge_zero_page_shrinker);
+               return -ENOMEM;
+       }
+
+       huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
+       huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
+       huge_zero_page_shrinker->seeks = DEFAULT_SEEKS;
+       shrinker_register(huge_zero_page_shrinker);
+
+       deferred_split_shrinker->count_objects = deferred_split_count;
+       deferred_split_shrinker->scan_objects = deferred_split_scan;
+       deferred_split_shrinker->seeks = DEFAULT_SEEKS;
+       shrinker_register(deferred_split_shrinker);
+
+       return 0;
+}
+
+static void __init thp_shrinker_exit(void)
+{
+       shrinker_free(huge_zero_page_shrinker);
+       shrinker_free(deferred_split_shrinker);
+}
+
 static int __init hugepage_init(void)
 {
        int err;
@@ -482,12 +516,9 @@ static int __init hugepage_init(void)
        if (err)
                goto err_slab;
 
-       err = register_shrinker(&huge_zero_page_shrinker, "thp-zero");
-       if (err)
-               goto err_hzp_shrinker;
-       err = register_shrinker(&deferred_split_shrinker, "thp-deferred_split");
+       err = thp_shrinker_init();
        if (err)
-               goto err_split_shrinker;
+               goto err_shrinker;
 
        /*
         * By default disable transparent hugepages on smaller systems,
@@ -505,10 +536,8 @@ static int __init hugepage_init(void)
 
        return 0;
 err_khugepaged:
-       unregister_shrinker(&deferred_split_shrinker);
-err_split_shrinker:
-       unregister_shrinker(&huge_zero_page_shrinker);
-err_hzp_shrinker:
+       thp_shrinker_exit();
+err_shrinker:
        khugepaged_destroy();
 err_slab:
        hugepage_exit_sysfs(hugepage_kobj);
@@ -2834,7 +2863,7 @@ void deferred_split_folio(struct folio *folio)
 #ifdef CONFIG_MEMCG
                if (memcg)
                        set_shrinker_bit(memcg, folio_nid(folio),
-                                        deferred_split_shrinker.id);
+                                        deferred_split_shrinker->id);
 #endif
        }
        spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
@@ -2908,14 +2937,6 @@ static unsigned long deferred_split_scan(struct shrinker 
*shrink,
        return split;
 }
 
-static struct shrinker deferred_split_shrinker = {
-       .count_objects = deferred_split_count,
-       .scan_objects = deferred_split_scan,
-       .seeks = DEFAULT_SEEKS,
-       .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
-                SHRINKER_NONSLAB,
-};
-
 #ifdef CONFIG_DEBUG_FS
 static void split_huge_pages_all(void)
 {
-- 
2.30.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.