Skip to content

Commit 4d337ce

Browse files
Ming Leiaxboe
Ming Lei
authored andcommitted
blk-mq: avoid to touch q->elevator without any protection
q->elevator is referred in blk_mq_has_sqsched() without any protection, no .q_usage_counter is held, no queue srcu and rcu read lock is held, so potential use-after-free may be triggered. Fix the issue by adding one queue flag for checking if the elevator uses single queue style dispatch. Meantime the elevator feature flag of ELEVATOR_F_MQ_AWARE isn't needed any more. Cc: Jan Kara <[email protected]> Signed-off-by: Ming Lei <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 5fd7a84 commit 4d337ce

File tree

6 files changed

+13
-19
lines changed

6 files changed

+13
-19
lines changed

block/bfq-iosched.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7188,6 +7188,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
71887188
bfq_init_root_group(bfqd->root_group, bfqd);
71897189
bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
71907190

7191+
/* We dispatch from request queue wide instead of hw queue */
7192+
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
7193+
71917194
wbt_disable_default(q);
71927195
return 0;
71937196

block/blk-mq-sched.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -564,6 +564,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
564564
int ret;
565565

566566
if (!e) {
567+
blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
567568
q->elevator = NULL;
568569
q->nr_requests = q->tag_set->queue_depth;
569570
return 0;

block/blk-mq.c

Lines changed: 2 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2142,20 +2142,6 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
21422142
}
21432143
EXPORT_SYMBOL(blk_mq_run_hw_queue);
21442144

2145-
/*
2146-
* Is the request queue handled by an IO scheduler that does not respect
2147-
* hardware queues when dispatching?
2148-
*/
2149-
static bool blk_mq_has_sqsched(struct request_queue *q)
2150-
{
2151-
struct elevator_queue *e = q->elevator;
2152-
2153-
if (e && e->type->ops.dispatch_request &&
2154-
!(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
2155-
return true;
2156-
return false;
2157-
}
2158-
21592145
/*
21602146
* Return prefered queue to dispatch from (if any) for non-mq aware IO
21612147
* scheduler.
@@ -2188,7 +2174,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
21882174
unsigned long i;
21892175

21902176
sq_hctx = NULL;
2191-
if (blk_mq_has_sqsched(q))
2177+
if (blk_queue_sq_sched(q))
21922178
sq_hctx = blk_mq_get_sq_hctx(q);
21932179
queue_for_each_hw_ctx(q, hctx, i) {
21942180
if (blk_mq_hctx_stopped(hctx))
@@ -2216,7 +2202,7 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
22162202
unsigned long i;
22172203

22182204
sq_hctx = NULL;
2219-
if (blk_mq_has_sqsched(q))
2205+
if (blk_queue_sq_sched(q))
22202206
sq_hctx = blk_mq_get_sq_hctx(q);
22212207
queue_for_each_hw_ctx(q, hctx, i) {
22222208
if (blk_mq_hctx_stopped(hctx))

block/kyber-iosched.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -421,6 +421,8 @@ static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
421421

422422
blk_stat_enable_accounting(q);
423423

424+
blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
425+
424426
eq->elevator_data = kqd;
425427
q->elevator = eq;
426428

@@ -1033,7 +1035,6 @@ static struct elevator_type kyber_sched = {
10331035
#endif
10341036
.elevator_attrs = kyber_sched_attrs,
10351037
.elevator_name = "kyber",
1036-
.elevator_features = ELEVATOR_F_MQ_AWARE,
10371038
.elevator_owner = THIS_MODULE,
10381039
};
10391040

block/mq-deadline.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -642,6 +642,9 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
642642
spin_lock_init(&dd->lock);
643643
spin_lock_init(&dd->zone_lock);
644644

645+
/* We dispatch from request queue wide instead of hw queue */
646+
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
647+
645648
q->elevator = eq;
646649
return 0;
647650

include/linux/blkdev.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -575,6 +575,7 @@ struct request_queue {
575575
#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
576576
#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
577577
#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
578+
#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
578579

579580
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
580581
(1 << QUEUE_FLAG_SAME_COMP) | \
@@ -616,6 +617,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
616617
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
617618
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
618619
#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
620+
#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
619621

620622
extern void blk_set_pm_only(struct request_queue *q);
621623
extern void blk_clear_pm_only(struct request_queue *q);
@@ -1006,8 +1008,6 @@ void disk_set_independent_access_ranges(struct gendisk *disk,
10061008
*/
10071009
/* Supports zoned block devices sequential write constraint */
10081010
#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
1009-
/* Supports scheduling on multiple hardware queues */
1010-
#define ELEVATOR_F_MQ_AWARE (1U << 1)
10111011

10121012
extern void blk_queue_required_elevator_features(struct request_queue *q,
10131013
unsigned int features);

0 commit comments

Comments
 (0)