diff options
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 80 |
1 files changed, 17 insertions, 63 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 27fb1357ad4b..3d286a256d3d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -136,7 +136,7 @@ static const char *const blk_op_name[] = { * string format. Useful in the debugging and tracing bio or request. For * invalid REQ_OP_XXX it returns string "UNKNOWN". */ -inline const char *blk_op_str(unsigned int op) +inline const char *blk_op_str(enum req_op op) { const char *op_str = "UNKNOWN"; @@ -285,49 +285,6 @@ void blk_queue_start_drain(struct request_queue *q) } /** - * blk_cleanup_queue - shutdown a request queue - * @q: request queue to shutdown - * - * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and - * put it. All future requests will be failed immediately with -ENODEV. - * - * Context: can sleep - */ -void blk_cleanup_queue(struct request_queue *q) -{ - /* cannot be called from atomic context */ - might_sleep(); - - WARN_ON_ONCE(blk_queue_registered(q)); - - /* mark @q DYING, no new request or merges will be allowed afterwards */ - blk_queue_flag_set(QUEUE_FLAG_DYING, q); - blk_queue_start_drain(q); - - blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); - blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); - - /* - * Drain all requests queued before DYING marking. Set DEAD flag to - * prevent that blk_mq_run_hw_queues() accesses the hardware queues - * after draining finished. - */ - blk_freeze_queue(q); - - blk_queue_flag_set(QUEUE_FLAG_DEAD, q); - - blk_sync_queue(q); - if (queue_is_mq(q)) { - blk_mq_cancel_work_sync(q); - blk_mq_exit_queue(q); - } - - /* @q is and will stay empty, shutdown and put */ - blk_put_queue(q); -} -EXPORT_SYMBOL(blk_cleanup_queue); - -/** * blk_queue_enter() - try to increase q->q_usage_counter * @q: request queue pointer * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM @@ -435,7 +392,7 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu) q->last_merge = NULL; - q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); + q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL); if (q->id < 0) goto fail_srcu; @@ -485,7 +442,7 @@ fail_stats: fail_split: bioset_exit(&q->bio_split); fail_id: - ida_simple_remove(&blk_queue_ida, q->id); + ida_free(&blk_queue_ida, q->id); fail_srcu: if (alloc_srcu) cleanup_srcu_struct(q->srcu); @@ -504,12 +461,10 @@ fail_q: */ bool blk_get_queue(struct request_queue *q) { - if (likely(!blk_queue_dying(q))) { - __blk_get_queue(q); - return true; - } - - return false; + if (unlikely(blk_queue_dying(q))) + return false; + kobject_get(&q->kobj); + return true; } EXPORT_SYMBOL(blk_get_queue); @@ -608,16 +563,15 @@ static int blk_partition_remap(struct bio *bio) static inline blk_status_t blk_check_zone_append(struct request_queue *q, struct bio *bio) { - sector_t pos = bio->bi_iter.bi_sector; int nr_sectors = bio_sectors(bio); /* Only applicable to zoned block devices */ - if (!blk_queue_is_zoned(q)) + if (!bdev_is_zoned(bio->bi_bdev)) return BLK_STS_NOTSUPP; /* The bio sector must point to the start of a sequential zone */ - if (pos & (blk_queue_zone_sectors(q) - 1) || - !blk_queue_zone_is_seq(q, pos)) + if (bio->bi_iter.bi_sector & (bdev_zone_sectors(bio->bi_bdev) - 1) || + !bio_zone_is_seq(bio)) return BLK_STS_IOERR; /* @@ -762,7 +716,7 @@ void submit_bio_noacct(struct bio *bio) might_sleep(); - plug = blk_mq_plug(q, bio); + plug = blk_mq_plug(bio); if (plug && plug->nowait) bio->bi_opf |= REQ_NOWAIT; @@ -818,11 +772,11 @@ void submit_bio_noacct(struct bio *bio) case REQ_OP_ZONE_OPEN: case REQ_OP_ZONE_CLOSE: case REQ_OP_ZONE_FINISH: - if (!blk_queue_is_zoned(q)) + if (!bdev_is_zoned(bio->bi_bdev)) goto not_supported; break; case REQ_OP_ZONE_RESET_ALL: - if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q)) + if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q)) goto not_supported; break; case REQ_OP_WRITE_ZEROES: @@ -987,7 +941,7 @@ void update_io_ticks(struct block_device *part, unsigned long now, bool end) again: stamp = READ_ONCE(part->bd_stamp); if (unlikely(time_after(now, stamp))) { - if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp)) + if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now))) __part_stat_add(part, io_ticks, end ? now - stamp : 1); } if (part->bd_partno) { @@ -997,7 +951,7 @@ again: } unsigned long bdev_start_io_acct(struct block_device *bdev, - unsigned int sectors, unsigned int op, + unsigned int sectors, enum req_op op, unsigned long start_time) { const int sgrp = op_stat_group(op); @@ -1038,7 +992,7 @@ unsigned long bio_start_io_acct(struct bio *bio) } EXPORT_SYMBOL_GPL(bio_start_io_acct); -void bdev_end_io_acct(struct block_device *bdev, unsigned int op, +void bdev_end_io_acct(struct block_device *bdev, enum req_op op, unsigned long start_time) { const int sgrp = op_stat_group(op); @@ -1247,7 +1201,7 @@ EXPORT_SYMBOL_GPL(blk_io_schedule); int __init blk_dev_init(void) { - BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); + BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS)); BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * sizeof_field(struct request, cmd_flags)); BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * |