[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 33/49] mbcache: dynamically allocate the mbcache shrinker



In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the mbcache shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct mb_cache.

Signed-off-by: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx>
Reviewed-by: Muchun Song <songmuchun@xxxxxxxxxxxxx>
---
 fs/mbcache.c | 23 +++++++++++++----------
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/fs/mbcache.c b/fs/mbcache.c
index 2a4b8b549e93..0d1e24e9a5e3 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -37,7 +37,7 @@ struct mb_cache {
        struct list_head        c_list;
        /* Number of entries in cache */
        unsigned long           c_entry_count;
-       struct shrinker         c_shrink;
+       struct shrinker         *c_shrink;
        /* Work for shrinking when the cache has too many entries */
        struct work_struct      c_shrink_work;
 };
@@ -293,8 +293,7 @@ EXPORT_SYMBOL(mb_cache_entry_touch);
 static unsigned long mb_cache_count(struct shrinker *shrink,
                                    struct shrink_control *sc)
 {
-       struct mb_cache *cache = container_of(shrink, struct mb_cache,
-                                             c_shrink);
+       struct mb_cache *cache = shrink->private_data;
 
        return cache->c_entry_count;
 }
@@ -333,8 +332,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
 static unsigned long mb_cache_scan(struct shrinker *shrink,
                                   struct shrink_control *sc)
 {
-       struct mb_cache *cache = container_of(shrink, struct mb_cache,
-                                             c_shrink);
+       struct mb_cache *cache = shrink->private_data;
        return mb_cache_shrink(cache, sc->nr_to_scan);
 }
 
@@ -377,15 +375,20 @@ struct mb_cache *mb_cache_create(int bucket_bits)
        for (i = 0; i < bucket_count; i++)
                INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
 
-       cache->c_shrink.count_objects = mb_cache_count;
-       cache->c_shrink.scan_objects = mb_cache_scan;
-       cache->c_shrink.seeks = DEFAULT_SEEKS;
-       if (register_shrinker(&cache->c_shrink, "mbcache-shrinker")) {
+       cache->c_shrink = shrinker_alloc(0, "mbcache-shrinker");
+       if (!cache->c_shrink) {
                kfree(cache->c_hash);
                kfree(cache);
                goto err_out;
        }
 
+       cache->c_shrink->count_objects = mb_cache_count;
+       cache->c_shrink->scan_objects = mb_cache_scan;
+       cache->c_shrink->seeks = DEFAULT_SEEKS;
+       cache->c_shrink->private_data = cache;
+
+       shrinker_register(cache->c_shrink);
+
        INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
 
        return cache;
@@ -406,7 +409,7 @@ void mb_cache_destroy(struct mb_cache *cache)
 {
        struct mb_cache_entry *entry, *next;
 
-       unregister_shrinker(&cache->c_shrink);
+       shrinker_free(cache->c_shrink);
 
        /*
         * We don't bother with any locking. Cache must not be used at this
-- 
2.30.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.