blk-mq: fix for flush deadlock
The flush state machine takes in a struct request, which then is submitted multiple times to the underling driver. The old block code requeses the same request for each of those, so it does not have an issue with tapping into the request pool. The new one on the other hand allocates a new request for each of the actualy steps of the flush sequence. If have already allocated all of the tags for IO, we will fail allocating the flush request. Set aside a reserved request just for flushes. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
280d45f6c3
commit
3228f48be2
@ -1102,7 +1102,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
|
||||
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
|
||||
{
|
||||
if (q->mq_ops)
|
||||
return blk_mq_alloc_request(q, rw, gfp_mask);
|
||||
return blk_mq_alloc_request(q, rw, gfp_mask, false);
|
||||
else
|
||||
return blk_old_get_request(q, rw, gfp_mask);
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ static void mq_flush_work(struct work_struct *work)
|
||||
|
||||
/* We don't need set REQ_FLUSH_SEQ, it's for consistency */
|
||||
rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
|
||||
__GFP_WAIT|GFP_ATOMIC);
|
||||
__GFP_WAIT|GFP_ATOMIC, true);
|
||||
rq->cmd_type = REQ_TYPE_FS;
|
||||
rq->end_io = flush_end_io;
|
||||
|
||||
|
@ -210,14 +210,15 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
|
||||
return rq;
|
||||
}
|
||||
|
||||
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
|
||||
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
|
||||
gfp_t gfp, bool reserved)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
if (blk_mq_queue_enter(q))
|
||||
return NULL;
|
||||
|
||||
rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
|
||||
rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
|
||||
blk_mq_put_ctx(rq->mq_ctx);
|
||||
return rq;
|
||||
}
|
||||
@ -1327,6 +1328,15 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
|
||||
reg->queue_depth = BLK_MQ_MAX_DEPTH;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set aside a tag for flush requests. It will only be used while
|
||||
* another flush request is in progress but outside the driver.
|
||||
*
|
||||
* TODO: only allocate if flushes are supported
|
||||
*/
|
||||
reg->queue_depth++;
|
||||
reg->reserved_tags++;
|
||||
|
||||
if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
|
@ -124,7 +124,7 @@ void blk_mq_insert_request(struct request_queue *, struct request *, bool);
|
||||
void blk_mq_run_queues(struct request_queue *q, bool async);
|
||||
void blk_mq_free_request(struct request *rq);
|
||||
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
|
||||
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
|
||||
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
|
||||
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
|
||||
struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user