[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 36/47] xfs: dynamically allocate the xfs-buf shrinker



In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the xfs-buf shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct xfs_buftarg.

Signed-off-by: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx>
---
 fs/xfs/xfs_buf.c | 25 ++++++++++++++-----------
 fs/xfs/xfs_buf.h |  2 +-
 2 files changed, 15 insertions(+), 12 deletions(-)

diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 15d1e5a7c2d3..19a0bf6ce115 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1906,8 +1906,7 @@ xfs_buftarg_shrink_scan(
        struct shrinker         *shrink,
        struct shrink_control   *sc)
 {
-       struct xfs_buftarg      *btp = container_of(shrink,
-                                       struct xfs_buftarg, bt_shrinker);
+       struct xfs_buftarg      *btp = shrink->private_data;
        LIST_HEAD(dispose);
        unsigned long           freed;
 
@@ -1929,8 +1928,7 @@ xfs_buftarg_shrink_count(
        struct shrinker         *shrink,
        struct shrink_control   *sc)
 {
-       struct xfs_buftarg      *btp = container_of(shrink,
-                                       struct xfs_buftarg, bt_shrinker);
+       struct xfs_buftarg      *btp = shrink->private_data;
        return list_lru_shrink_count(&btp->bt_lru, sc);
 }
 
@@ -1938,7 +1936,7 @@ void
 xfs_free_buftarg(
        struct xfs_buftarg      *btp)
 {
-       unregister_shrinker(&btp->bt_shrinker);
+       shrinker_unregister(btp->bt_shrinker);
        ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
        percpu_counter_destroy(&btp->bt_io_count);
        list_lru_destroy(&btp->bt_lru);
@@ -2021,13 +2019,18 @@ xfs_alloc_buftarg(
        if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
                goto error_lru;
 
-       btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
-       btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
-       btp->bt_shrinker.seeks = DEFAULT_SEEKS;
-       btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
-       if (register_shrinker(&btp->bt_shrinker, "xfs-buf:%s",
-                             mp->m_super->s_id))
+       btp->bt_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-buf:%s",
+                                         mp->m_super->s_id);
+       if (!btp->bt_shrinker)
                goto error_pcpu;
+
+       btp->bt_shrinker->count_objects = xfs_buftarg_shrink_count;
+       btp->bt_shrinker->scan_objects = xfs_buftarg_shrink_scan;
+       btp->bt_shrinker->seeks = DEFAULT_SEEKS;
+       btp->bt_shrinker->private_data = btp;
+
+       shrinker_register(btp->bt_shrinker);
+
        return btp;
 
 error_pcpu:
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 549c60942208..4e6969a675f7 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -102,7 +102,7 @@ typedef struct xfs_buftarg {
        size_t                  bt_logical_sectormask;
 
        /* LRU control structures */
-       struct shrinker         bt_shrinker;
+       struct shrinker         *bt_shrinker;
        struct list_lru         bt_lru;
 
        struct percpu_counter   bt_io_count;
-- 
2.30.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.