[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH 06/45] zram: remove the claim mechanism
On Tue 24-11-20 14:27:12, Christoph Hellwig wrote: > The zram claim mechanism was added to ensure no new opens come in > during teardown. But the proper way to archive that is to call ^^^ achieve > del_gendisk first, which takes care of all that. Once del_gendisk > is called in the right place, the reset side can also be simplified > as no I/O can be outstanding on a block device that is not open. > > Signed-off-by: Christoph Hellwig <hch@xxxxxx> Otherwise I didn't find anything obviously wrong with the patch but I don't feel confident enough with zram to really give you my reviewed-by on this one. Honza > --- > drivers/block/zram/zram_drv.c | 72 ++++++++--------------------------- > 1 file changed, 15 insertions(+), 57 deletions(-) > > diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c > index 6d15d51cee2b7e..2e6d75ec1afddb 100644 > --- a/drivers/block/zram/zram_drv.c > +++ b/drivers/block/zram/zram_drv.c > @@ -1756,64 +1756,33 @@ static ssize_t disksize_store(struct device *dev, > static ssize_t reset_store(struct device *dev, > struct device_attribute *attr, const char *buf, size_t len) > { > - int ret; > - unsigned short do_reset; > - struct zram *zram; > + struct zram *zram = dev_to_zram(dev); > struct block_device *bdev; > + unsigned short do_reset; > + int ret = 0; > > ret = kstrtou16(buf, 10, &do_reset); > if (ret) > return ret; > - > if (!do_reset) > return -EINVAL; > > - zram = dev_to_zram(dev); > bdev = bdget_disk(zram->disk, 0); > if (!bdev) > return -ENOMEM; > > mutex_lock(&bdev->bd_mutex); > - /* Do not reset an active device or claimed device */ > - if (bdev->bd_openers || zram->claim) { > - mutex_unlock(&bdev->bd_mutex); > - bdput(bdev); > - return -EBUSY; > - } > - > - /* From now on, anyone can't open /dev/zram[0-9] */ > - zram->claim = true; > + if (bdev->bd_openers) > + ret = -EBUSY; > + else > + zram_reset_device(zram); > mutex_unlock(&bdev->bd_mutex); > - > - /* Make sure all the pending I/O are finished */ > - fsync_bdev(bdev); > - zram_reset_device(zram); > bdput(bdev); > > - mutex_lock(&bdev->bd_mutex); > - zram->claim = false; > - mutex_unlock(&bdev->bd_mutex); > - > - return len; > -} > - > -static int zram_open(struct block_device *bdev, fmode_t mode) > -{ > - int ret = 0; > - struct zram *zram; > - > - WARN_ON(!mutex_is_locked(&bdev->bd_mutex)); > - > - zram = bdev->bd_disk->private_data; > - /* zram was claimed to reset so open request fails */ > - if (zram->claim) > - ret = -EBUSY; > - > - return ret; > + return ret ? ret : len; > } > > static const struct block_device_operations zram_devops = { > - .open = zram_open, > .submit_bio = zram_submit_bio, > .swap_slot_free_notify = zram_slot_free_notify, > .rw_page = zram_rw_page, > @@ -1821,7 +1790,6 @@ static const struct block_device_operations zram_devops > = { > }; > > static const struct block_device_operations zram_wb_devops = { > - .open = zram_open, > .submit_bio = zram_submit_bio, > .swap_slot_free_notify = zram_slot_free_notify, > .owner = THIS_MODULE > @@ -1974,32 +1942,22 @@ static int zram_add(void) > > static int zram_remove(struct zram *zram) > { > - struct block_device *bdev; > - > - bdev = bdget_disk(zram->disk, 0); > - if (!bdev) > - return -ENOMEM; > + struct block_device *bdev = bdget_disk(zram->disk, 0); > > - mutex_lock(&bdev->bd_mutex); > - if (bdev->bd_openers || zram->claim) { > - mutex_unlock(&bdev->bd_mutex); > + if (bdev) { > + if (bdev->bd_openers) { > + bdput(bdev); > + return -EBUSY; > + } > bdput(bdev); > - return -EBUSY; > } > > - zram->claim = true; > - mutex_unlock(&bdev->bd_mutex); > - > + del_gendisk(zram->disk); > zram_debugfs_unregister(zram); > - > - /* Make sure all the pending I/O are finished */ > - fsync_bdev(bdev); > zram_reset_device(zram); > - bdput(bdev); > > pr_info("Removed device: %s\n", zram->disk->disk_name); > > - del_gendisk(zram->disk); > blk_cleanup_queue(zram->disk->queue); > put_disk(zram->disk); > kfree(zram); > -- > 2.29.2 > -- Jan Kara <jack@xxxxxxxx> SUSE Labs, CR
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |