[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 16/30] aoe: use blk_mq_alloc_disk and blk_cleanup_disk



Use blk_mq_alloc_disk and blk_cleanup_disk to simplify the gendisk and
request_queue allocation.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
 drivers/block/aoe/aoeblk.c | 33 ++++++++++++---------------------
 drivers/block/aoe/aoedev.c |  3 +--
 2 files changed, 13 insertions(+), 23 deletions(-)

diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index c34e71b0c4a9..06b360f7123a 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -338,14 +338,13 @@ static const struct blk_mq_ops aoeblk_mq_ops = {
        .queue_rq       = aoeblk_queue_rq,
 };
 
-/* alloc_disk and add_disk can sleep */
+/* blk_mq_alloc_disk and add_disk can sleep */
 void
 aoeblk_gdalloc(void *vp)
 {
        struct aoedev *d = vp;
        struct gendisk *gd;
        mempool_t *mp;
-       struct request_queue *q;
        struct blk_mq_tag_set *set;
        ulong flags;
        int late = 0;
@@ -362,19 +361,12 @@ aoeblk_gdalloc(void *vp)
        if (late)
                return;
 
-       gd = alloc_disk(AOE_PARTITIONS);
-       if (gd == NULL) {
-               pr_err("aoe: cannot allocate disk structure for %ld.%d\n",
-                       d->aoemajor, d->aoeminor);
-               goto err;
-       }
-
        mp = mempool_create(MIN_BUFS, mempool_alloc_slab, mempool_free_slab,
                buf_pool_cache);
        if (mp == NULL) {
                printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n",
                        d->aoemajor, d->aoeminor);
-               goto err_disk;
+               goto err;
        }
 
        set = &d->tag_set;
@@ -391,12 +383,11 @@ aoeblk_gdalloc(void *vp)
                goto err_mempool;
        }
 
-       q = blk_mq_init_queue(set);
-       if (IS_ERR(q)) {
+       gd = blk_mq_alloc_disk(set, d);
+       if (IS_ERR(gd)) {
                pr_err("aoe: cannot allocate block queue for %ld.%d\n",
                        d->aoemajor, d->aoeminor);
-               blk_mq_free_tag_set(set);
-               goto err_mempool;
+               goto err_tagset;
        }
 
        spin_lock_irqsave(&d->lock, flags);
@@ -405,16 +396,16 @@ aoeblk_gdalloc(void *vp)
        WARN_ON(d->flags & DEVFL_TKILL);
        WARN_ON(d->gd);
        WARN_ON(d->flags & DEVFL_UP);
-       blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
-       blk_queue_io_opt(q, SZ_2M);
+       blk_queue_max_hw_sectors(gd->queue, BLK_DEF_MAX_SECTORS);
+       blk_queue_io_opt(gd->queue, SZ_2M);
        d->bufpool = mp;
-       d->blkq = gd->queue = q;
-       q->queuedata = d;
+       d->blkq = gd->queue;
        d->gd = gd;
        if (aoe_maxsectors)
-               blk_queue_max_hw_sectors(q, aoe_maxsectors);
+               blk_queue_max_hw_sectors(gd->queue, aoe_maxsectors);
        gd->major = AOE_MAJOR;
        gd->first_minor = d->sysminor;
+       gd->minors = AOE_PARTITIONS;
        gd->fops = &aoe_bdops;
        gd->private_data = d;
        set_capacity(gd, d->ssize);
@@ -435,10 +426,10 @@ aoeblk_gdalloc(void *vp)
        spin_unlock_irqrestore(&d->lock, flags);
        return;
 
+err_tagset:
+       blk_mq_free_tag_set(set);
 err_mempool:
        mempool_destroy(mp);
-err_disk:
-       put_disk(gd);
 err:
        spin_lock_irqsave(&d->lock, flags);
        d->flags &= ~DEVFL_GD_NOW;
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index e2ea2356da06..c5753c6bfe80 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -277,9 +277,8 @@ freedev(struct aoedev *d)
        if (d->gd) {
                aoedisk_rm_debugfs(d);
                del_gendisk(d->gd);
-               put_disk(d->gd);
+               blk_cleanup_disk(d->gd);
                blk_mq_free_tag_set(&d->tag_set);
-               blk_cleanup_queue(d->blkq);
        }
        t = d->targets;
        e = t + d->ntargets;
-- 
2.30.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.