Skip to content

Commit 1874198

Browse files
Christoph Hellwigaxboe
Christoph Hellwig
authored andcommitted
blk-mq: rework flush sequencing logic
Witch to using a preallocated flush_rq for blk-mq similar to what's done with the old request path. This allows us to set up the request properly with a tag from the actually allowed range and ->rq_disk as needed by some drivers. To make life easier we also switch to dynamic allocation of ->flush_rq for the old path. This effectively reverts most of "blk-mq: fix for flush deadlock" and "blk-mq: Don't reserve a tag for flush request" Signed-off-by: Christoph Hellwig <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent ce2c350 commit 1874198

File tree

7 files changed

+76
-117
lines changed

7 files changed

+76
-117
lines changed

block/blk-core.c

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -693,11 +693,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
693693
if (!uninit_q)
694694
return NULL;
695695

696+
uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
697+
if (!uninit_q->flush_rq)
698+
goto out_cleanup_queue;
699+
696700
q = blk_init_allocated_queue(uninit_q, rfn, lock);
697701
if (!q)
698-
blk_cleanup_queue(uninit_q);
699-
702+
goto out_free_flush_rq;
700703
return q;
704+
705+
out_free_flush_rq:
706+
kfree(uninit_q->flush_rq);
707+
out_cleanup_queue:
708+
blk_cleanup_queue(uninit_q);
709+
return NULL;
701710
}
702711
EXPORT_SYMBOL(blk_init_queue_node);
703712

@@ -1127,7 +1136,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
11271136
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
11281137
{
11291138
if (q->mq_ops)
1130-
return blk_mq_alloc_request(q, rw, gfp_mask, false);
1139+
return blk_mq_alloc_request(q, rw, gfp_mask);
11311140
else
11321141
return blk_old_get_request(q, rw, gfp_mask);
11331142
}

block/blk-flush.c

Lines changed: 38 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -130,20 +130,26 @@ static void blk_flush_restore_request(struct request *rq)
130130
blk_clear_rq_complete(rq);
131131
}
132132

133-
static void mq_flush_data_run(struct work_struct *work)
133+
static void mq_flush_run(struct work_struct *work)
134134
{
135135
struct request *rq;
136136

137-
rq = container_of(work, struct request, mq_flush_data);
137+
rq = container_of(work, struct request, mq_flush_work);
138138

139139
memset(&rq->csd, 0, sizeof(rq->csd));
140140
blk_mq_run_request(rq, true, false);
141141
}
142142

143-
static void blk_mq_flush_data_insert(struct request *rq)
143+
static bool blk_flush_queue_rq(struct request *rq)
144144
{
145-
INIT_WORK(&rq->mq_flush_data, mq_flush_data_run);
146-
kblockd_schedule_work(rq->q, &rq->mq_flush_data);
145+
if (rq->q->mq_ops) {
146+
INIT_WORK(&rq->mq_flush_work, mq_flush_run);
147+
kblockd_schedule_work(rq->q, &rq->mq_flush_work);
148+
return false;
149+
} else {
150+
list_add_tail(&rq->queuelist, &rq->q->queue_head);
151+
return true;
152+
}
147153
}
148154

149155
/**
@@ -187,12 +193,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
187193

188194
case REQ_FSEQ_DATA:
189195
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
190-
if (q->mq_ops)
191-
blk_mq_flush_data_insert(rq);
192-
else {
193-
list_add(&rq->queuelist, &q->queue_head);
194-
queued = true;
195-
}
196+
queued = blk_flush_queue_rq(rq);
196197
break;
197198

198199
case REQ_FSEQ_DONE:
@@ -216,9 +217,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
216217
}
217218

218219
kicked = blk_kick_flush(q);
219-
/* blk_mq_run_flush will run queue */
220-
if (q->mq_ops)
221-
return queued;
222220
return kicked | queued;
223221
}
224222

@@ -230,10 +228,9 @@ static void flush_end_io(struct request *flush_rq, int error)
230228
struct request *rq, *n;
231229
unsigned long flags = 0;
232230

233-
if (q->mq_ops) {
234-
blk_mq_free_request(flush_rq);
231+
if (q->mq_ops)
235232
spin_lock_irqsave(&q->mq_flush_lock, flags);
236-
}
233+
237234
running = &q->flush_queue[q->flush_running_idx];
238235
BUG_ON(q->flush_pending_idx == q->flush_running_idx);
239236

@@ -263,48 +260,14 @@ static void flush_end_io(struct request *flush_rq, int error)
263260
* kblockd.
264261
*/
265262
if (queued || q->flush_queue_delayed) {
266-
if (!q->mq_ops)
267-
blk_run_queue_async(q);
268-
else
269-
/*
270-
* This can be optimized to only run queues with requests
271-
* queued if necessary.
272-
*/
273-
blk_mq_run_queues(q, true);
263+
WARN_ON(q->mq_ops);
264+
blk_run_queue_async(q);
274265
}
275266
q->flush_queue_delayed = 0;
276267
if (q->mq_ops)
277268
spin_unlock_irqrestore(&q->mq_flush_lock, flags);
278269
}
279270

280-
static void mq_flush_work(struct work_struct *work)
281-
{
282-
struct request_queue *q;
283-
struct request *rq;
284-
285-
q = container_of(work, struct request_queue, mq_flush_work);
286-
287-
rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
288-
__GFP_WAIT|GFP_ATOMIC, false);
289-
rq->cmd_type = REQ_TYPE_FS;
290-
rq->end_io = flush_end_io;
291-
292-
blk_mq_run_request(rq, true, false);
293-
}
294-
295-
/*
296-
* We can't directly use q->flush_rq, because it doesn't have tag and is not in
297-
* hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
298-
* so offload the work to workqueue.
299-
*
300-
* Note: we assume a flush request finished in any hardware queue will flush
301-
* the whole disk cache.
302-
*/
303-
static void mq_run_flush(struct request_queue *q)
304-
{
305-
kblockd_schedule_work(q, &q->mq_flush_work);
306-
}
307-
308271
/**
309272
* blk_kick_flush - consider issuing flush request
310273
* @q: request_queue being kicked
@@ -339,19 +302,31 @@ static bool blk_kick_flush(struct request_queue *q)
339302
* different from running_idx, which means flush is in flight.
340303
*/
341304
q->flush_pending_idx ^= 1;
305+
342306
if (q->mq_ops) {
343-
mq_run_flush(q);
344-
return true;
307+
struct blk_mq_ctx *ctx = first_rq->mq_ctx;
308+
struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
309+
310+
blk_mq_rq_init(hctx, q->flush_rq);
311+
q->flush_rq->mq_ctx = ctx;
312+
313+
/*
314+
* Reuse the tag value from the fist waiting request,
315+
* with blk-mq the tag is generated during request
316+
* allocation and drivers can rely on it being inside
317+
* the range they asked for.
318+
*/
319+
q->flush_rq->tag = first_rq->tag;
320+
} else {
321+
blk_rq_init(q, q->flush_rq);
345322
}
346323

347-
blk_rq_init(q, &q->flush_rq);
348-
q->flush_rq.cmd_type = REQ_TYPE_FS;
349-
q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
350-
q->flush_rq.rq_disk = first_rq->rq_disk;
351-
q->flush_rq.end_io = flush_end_io;
324+
q->flush_rq->cmd_type = REQ_TYPE_FS;
325+
q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
326+
q->flush_rq->rq_disk = first_rq->rq_disk;
327+
q->flush_rq->end_io = flush_end_io;
352328

353-
list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
354-
return true;
329+
return blk_flush_queue_rq(q->flush_rq);
355330
}
356331

357332
static void flush_data_end_io(struct request *rq, int error)
@@ -407,11 +382,8 @@ void blk_insert_flush(struct request *rq)
407382
/*
408383
* @policy now records what operations need to be done. Adjust
409384
* REQ_FLUSH and FUA for the driver.
410-
* We keep REQ_FLUSH for mq to track flush requests. For !FUA,
411-
* we never dispatch the request directly.
412385
*/
413-
if (rq->cmd_flags & REQ_FUA)
414-
rq->cmd_flags &= ~REQ_FLUSH;
386+
rq->cmd_flags &= ~REQ_FLUSH;
415387
if (!(fflags & REQ_FUA))
416388
rq->cmd_flags &= ~REQ_FUA;
417389

@@ -560,5 +532,4 @@ EXPORT_SYMBOL(blkdev_issue_flush);
560532
void blk_mq_init_flush(struct request_queue *q)
561533
{
562534
spin_lock_init(&q->mq_flush_lock);
563-
INIT_WORK(&q->mq_flush_work, mq_flush_work);
564535
}

block/blk-mq.c

Lines changed: 19 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -194,27 +194,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
194194
}
195195

196196
static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
197-
gfp_t gfp, bool reserved,
198-
int rw)
197+
gfp_t gfp, bool reserved)
199198
{
200-
struct request *req;
201-
bool is_flush = false;
202-
/*
203-
* flush need allocate a request, leave at least one request for
204-
* non-flush IO to avoid deadlock
205-
*/
206-
if ((rw & REQ_FLUSH) && !(rw & REQ_FLUSH_SEQ)) {
207-
if (atomic_inc_return(&hctx->pending_flush) >=
208-
hctx->queue_depth - hctx->reserved_tags - 1) {
209-
atomic_dec(&hctx->pending_flush);
210-
return NULL;
211-
}
212-
is_flush = true;
213-
}
214-
req = blk_mq_alloc_rq(hctx, gfp, reserved);
215-
if (!req && is_flush)
216-
atomic_dec(&hctx->pending_flush);
217-
return req;
199+
return blk_mq_alloc_rq(hctx, gfp, reserved);
218200
}
219201

220202
static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
@@ -227,7 +209,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
227209
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
228210
struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
229211

230-
rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved, rw);
212+
rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
231213
if (rq) {
232214
blk_mq_rq_ctx_init(q, ctx, rq, rw);
233215
break;
@@ -244,15 +226,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
244226
return rq;
245227
}
246228

247-
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
248-
gfp_t gfp, bool reserved)
229+
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
249230
{
250231
struct request *rq;
251232

252233
if (blk_mq_queue_enter(q))
253234
return NULL;
254235

255-
rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
236+
rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
256237
if (rq)
257238
blk_mq_put_ctx(rq->mq_ctx);
258239
return rq;
@@ -276,7 +257,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
276257
/*
277258
* Re-init and set pdu, if we have it
278259
*/
279-
static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
260+
void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
280261
{
281262
blk_rq_init(hctx->queue, rq);
282263

@@ -290,9 +271,6 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
290271
const int tag = rq->tag;
291272
struct request_queue *q = rq->q;
292273

293-
if ((rq->cmd_flags & REQ_FLUSH) && !(rq->cmd_flags & REQ_FLUSH_SEQ))
294-
atomic_dec(&hctx->pending_flush);
295-
296274
blk_mq_rq_init(hctx, rq);
297275
blk_mq_put_tag(hctx->tags, tag);
298276

@@ -946,14 +924,14 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
946924
hctx = q->mq_ops->map_queue(q, ctx->cpu);
947925

948926
trace_block_getrq(q, bio, rw);
949-
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false, bio->bi_rw);
927+
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
950928
if (likely(rq))
951-
blk_mq_rq_ctx_init(q, ctx, rq, bio->bi_rw);
929+
blk_mq_rq_ctx_init(q, ctx, rq, rw);
952930
else {
953931
blk_mq_put_ctx(ctx);
954932
trace_block_sleeprq(q, bio, rw);
955-
rq = blk_mq_alloc_request_pinned(q, bio->bi_rw,
956-
__GFP_WAIT|GFP_ATOMIC, false);
933+
rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
934+
false);
957935
ctx = rq->mq_ctx;
958936
hctx = q->mq_ops->map_queue(q, ctx->cpu);
959937
}
@@ -1230,9 +1208,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
12301208
hctx->queue_num = i;
12311209
hctx->flags = reg->flags;
12321210
hctx->queue_depth = reg->queue_depth;
1233-
hctx->reserved_tags = reg->reserved_tags;
12341211
hctx->cmd_size = reg->cmd_size;
1235-
atomic_set(&hctx->pending_flush, 0);
12361212

12371213
blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
12381214
blk_mq_hctx_notify, hctx);
@@ -1412,16 +1388,24 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
14121388
blk_mq_init_flush(q);
14131389
blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
14141390

1415-
if (blk_mq_init_hw_queues(q, reg, driver_data))
1391+
q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size,
1392+
cache_line_size()), GFP_KERNEL);
1393+
if (!q->flush_rq)
14161394
goto err_hw;
14171395

1396+
if (blk_mq_init_hw_queues(q, reg, driver_data))
1397+
goto err_flush_rq;
1398+
14181399
blk_mq_map_swqueue(q);
14191400

14201401
mutex_lock(&all_q_mutex);
14211402
list_add_tail(&q->all_q_node, &all_q_list);
14221403
mutex_unlock(&all_q_mutex);
14231404

14241405
return q;
1406+
1407+
err_flush_rq:
1408+
kfree(q->flush_rq);
14251409
err_hw:
14261410
kfree(q->mq_map);
14271411
err_map:

block/blk-mq.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
2828
void blk_mq_init_flush(struct request_queue *q);
2929
void blk_mq_drain_queue(struct request_queue *q);
3030
void blk_mq_free_queue(struct request_queue *q);
31+
void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq);
3132

3233
/*
3334
* CPU hotplug helpers

block/blk-sysfs.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -549,6 +549,8 @@ static void blk_release_queue(struct kobject *kobj)
549549
if (q->mq_ops)
550550
blk_mq_free_queue(q);
551551

552+
kfree(q->flush_rq);
553+
552554
blk_trace_shutdown(q);
553555

554556
bdi_destroy(&q->backing_dev_info);

include/linux/blk-mq.h

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,15 +36,12 @@ struct blk_mq_hw_ctx {
3636
struct list_head page_list;
3737
struct blk_mq_tags *tags;
3838

39-
atomic_t pending_flush;
40-
4139
unsigned long queued;
4240
unsigned long run;
4341
#define BLK_MQ_MAX_DISPATCH_ORDER 10
4442
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
4543

4644
unsigned int queue_depth;
47-
unsigned int reserved_tags;
4845
unsigned int numa_node;
4946
unsigned int cmd_size; /* per-request extra data */
5047

@@ -129,7 +126,7 @@ void blk_mq_insert_request(struct request_queue *, struct request *,
129126
void blk_mq_run_queues(struct request_queue *q, bool async);
130127
void blk_mq_free_request(struct request *rq);
131128
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
132-
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
129+
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
133130
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
134131
struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
135132

0 commit comments

Comments
 (0)