[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 29/49] md/raid5: dynamically allocate the md-raid5 shrinker





On 2023/7/27 16:04, Qi Zheng wrote:
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the md-raid5 shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct r5conf.

Signed-off-by: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx>
Reviewed-by: Muchun Song <songmuchun@xxxxxxxxxxxxx>
---
  drivers/md/raid5.c | 25 ++++++++++++++-----------
  drivers/md/raid5.h |  2 +-
  2 files changed, 15 insertions(+), 12 deletions(-)

diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 85b3004594e0..fbb4e6f5ff43 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7414,7 +7414,7 @@ static void free_conf(struct r5conf *conf)
log_exit(conf); - unregister_shrinker(&conf->shrinker);
+       shrinker_free(conf->shrinker);
        free_thread_groups(conf);
        shrink_stripes(conf);
        raid5_free_percpu(conf);
@@ -7462,7 +7462,7 @@ static int raid5_alloc_percpu(struct r5conf *conf)
  static unsigned long raid5_cache_scan(struct shrinker *shrink,
                                      struct shrink_control *sc)
  {
-       struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
+       struct r5conf *conf = shrink->private_data;
        unsigned long ret = SHRINK_STOP;
if (mutex_trylock(&conf->cache_size_mutex)) {
@@ -7483,7 +7483,7 @@ static unsigned long raid5_cache_scan(struct shrinker 
*shrink,
  static unsigned long raid5_cache_count(struct shrinker *shrink,
                                       struct shrink_control *sc)
  {
-       struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
+       struct r5conf *conf = shrink->private_data;
if (conf->max_nr_stripes < conf->min_nr_stripes)
                /* unlikely, but not impossible */
@@ -7718,18 +7718,21 @@ static struct r5conf *setup_conf(struct mddev *mddev)
         * it reduces the queue depth and so can hurt throughput.
         * So set it rather large, scaled by number of devices.
         */
-       conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4;
-       conf->shrinker.scan_objects = raid5_cache_scan;
-       conf->shrinker.count_objects = raid5_cache_count;
-       conf->shrinker.batch = 128;
-       conf->shrinker.flags = 0;
-       ret = register_shrinker(&conf->shrinker, "md-raid5:%s", mdname(mddev));
-       if (ret) {
-               pr_warn("md/raid:%s: couldn't register shrinker.\n",
+       conf->shrinker = shrinker_alloc(0, "md-raid5:%s", mdname(mddev));
+       if (!conf->shrinker) {

Here should set ret to -ENOMEM, will fix.

+               pr_warn("md/raid:%s: couldn't allocate shrinker.\n",
                        mdname(mddev));
                goto abort;
        }
+ conf->shrinker->seeks = DEFAULT_SEEKS * conf->raid_disks * 4;
+       conf->shrinker->scan_objects = raid5_cache_scan;
+       conf->shrinker->count_objects = raid5_cache_count;
+       conf->shrinker->batch = 128;
+       conf->shrinker->private_data = conf;
+
+       shrinker_register(conf->shrinker);
+
        sprintf(pers_name, "raid%d", mddev->new_level);
        rcu_assign_pointer(conf->thread,
                           md_register_thread(raid5d, mddev, pers_name));
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 97a795979a35..22bea20eccbd 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -670,7 +670,7 @@ struct r5conf {
        wait_queue_head_t       wait_for_stripe;
        wait_queue_head_t       wait_for_overlap;
        unsigned long           cache_state;
-       struct shrinker         shrinker;
+       struct shrinker         *shrinker;
        int                     pool_size; /* number of disks in stripeheads in 
pool */
        spinlock_t              device_lock;
        struct disk_info        *disks;



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.