[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v4 27/48] dm zoned: dynamically allocate the dm-zoned-meta shrinker



In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the dm-zoned-meta shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct dmz_metadata.

Signed-off-by: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx>
Reviewed-by: Muchun Song <songmuchun@xxxxxxxxxxxxx>
---
 drivers/md/dm-zoned-metadata.c | 29 +++++++++++++++++------------
 1 file changed, 17 insertions(+), 12 deletions(-)

diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 9d3cca8e3dc9..bbb0e69a7908 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -187,7 +187,7 @@ struct dmz_metadata {
        struct rb_root          mblk_rbtree;
        struct list_head        mblk_lru_list;
        struct list_head        mblk_dirty_list;
-       struct shrinker         mblk_shrinker;
+       struct shrinker         *mblk_shrinker;
 
        /* Zone allocation management */
        struct mutex            map_lock;
@@ -615,7 +615,7 @@ static unsigned long dmz_shrink_mblock_cache(struct 
dmz_metadata *zmd,
 static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink,
                                               struct shrink_control *sc)
 {
-       struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, 
mblk_shrinker);
+       struct dmz_metadata *zmd = shrink->private_data;
 
        return atomic_read(&zmd->nr_mblks);
 }
@@ -626,7 +626,7 @@ static unsigned long dmz_mblock_shrinker_count(struct 
shrinker *shrink,
 static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink,
                                              struct shrink_control *sc)
 {
-       struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, 
mblk_shrinker);
+       struct dmz_metadata *zmd = shrink->private_data;
        unsigned long count;
 
        spin_lock(&zmd->mblk_lock);
@@ -2936,19 +2936,24 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
         */
        zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks 
* 16;
        zmd->max_nr_mblks = zmd->min_nr_mblks + 512;
-       zmd->mblk_shrinker.count_objects = dmz_mblock_shrinker_count;
-       zmd->mblk_shrinker.scan_objects = dmz_mblock_shrinker_scan;
-       zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
 
        /* Metadata cache shrinker */
-       ret = register_shrinker(&zmd->mblk_shrinker, "dm-zoned-meta:(%u:%u)",
-                               MAJOR(dev->bdev->bd_dev),
-                               MINOR(dev->bdev->bd_dev));
-       if (ret) {
-               dmz_zmd_err(zmd, "Register metadata cache shrinker failed");
+       zmd->mblk_shrinker = shrinker_alloc(0,  "dm-zoned-meta:(%u:%u)",
+                                           MAJOR(dev->bdev->bd_dev),
+                                           MINOR(dev->bdev->bd_dev));
+       if (!zmd->mblk_shrinker) {
+               ret = -ENOMEM;
+               dmz_zmd_err(zmd, "Allocate metadata cache shrinker failed");
                goto err;
        }
 
+       zmd->mblk_shrinker->count_objects = dmz_mblock_shrinker_count;
+       zmd->mblk_shrinker->scan_objects = dmz_mblock_shrinker_scan;
+       zmd->mblk_shrinker->seeks = DEFAULT_SEEKS;
+       zmd->mblk_shrinker->private_data = zmd;
+
+       shrinker_register(zmd->mblk_shrinker);
+
        dmz_zmd_info(zmd, "DM-Zoned metadata version %d", zmd->sb_version);
        for (i = 0; i < zmd->nr_devs; i++)
                dmz_print_dev(zmd, i);
@@ -2995,7 +3000,7 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
  */
 void dmz_dtr_metadata(struct dmz_metadata *zmd)
 {
-       unregister_shrinker(&zmd->mblk_shrinker);
+       shrinker_free(zmd->mblk_shrinker);
        dmz_cleanup_metadata(zmd);
        kfree(zmd);
 }
-- 
2.30.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.