[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 13/27] block: add a bdev_fua helper
Add a helper to check the FUA flag based on the block_device instead of having to poke into the block layer internal request_queue. Signed-off-by: Christoph Hellwig <hch@xxxxxx> Reviewed-by: Martin K. Petersen <martin.petersen@xxxxxxxxxx> --- drivers/block/rnbd/rnbd-srv.c | 3 +-- drivers/target/target_core_iblock.c | 3 +-- fs/iomap/direct-io.c | 3 +-- include/linux/blkdev.h | 6 +++++- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index f8cc3c5fecb4b..beaef43a67b9d 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -533,7 +533,6 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp, struct rnbd_srv_sess_dev *sess_dev) { struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev; - struct request_queue *q = bdev_get_queue(rnbd_dev->bdev); rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP); rsp->device_id = @@ -560,7 +559,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp, rsp->cache_policy = 0; if (bdev_write_cache(rnbd_dev->bdev)) rsp->cache_policy |= RNBD_WRITEBACK; - if (blk_queue_fua(q)) + if (bdev_fua(rnbd_dev->bdev)) rsp->cache_policy |= RNBD_FUA; } diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 03013e85ffc03..c4a903b8a47fc 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -727,14 +727,13 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, if (data_direction == DMA_TO_DEVICE) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev); - struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); /* * Force writethrough using REQ_FUA if a volatile write cache * is not enabled, or if initiator set the Force Unit Access bit. */ opf = REQ_OP_WRITE; miter_dir = SG_MITER_TO_SG; - if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) { + if (bdev_fua(ib_dev->ibd_bd)) { if (cmd->se_cmd_flags & SCF_FUA) opf |= REQ_FUA; else if (!bdev_write_cache(ib_dev->ibd_bd)) diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index b08f5dc31780d..62da020d02a11 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -265,8 +265,7 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, * cache flushes on IO completion. */ if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && - (dio->flags & IOMAP_DIO_WRITE_FUA) && - blk_queue_fua(bdev_get_queue(iomap->bdev))) + (dio->flags & IOMAP_DIO_WRITE_FUA) && bdev_fua(iomap->bdev)) use_fua = true; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 807a49aa5a27a..075b16d4560e7 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -602,7 +602,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); REQ_FAILFAST_DRIVER)) #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) -#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) #define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags) @@ -1336,6 +1335,11 @@ static inline bool bdev_write_cache(struct block_device *bdev) return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags); } +static inline bool bdev_fua(struct block_device *bdev) +{ + return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags); +} + static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); -- 2.30.2
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |