forked from luck/tmp_suning_uos_patched
block: collapse blk_alloc_request() into get_request()
Allocation failure handling in get_request() is about to be updated. To ease the update, collapse blk_alloc_request() into get_request(). This patch doesn't introduce any functional change. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f9fcc2d391
commit
29e2b09ab5
|
@ -719,33 +719,6 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
|
||||||
mempool_free(rq, q->rq.rq_pool);
|
mempool_free(rq, q->rq.rq_pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct request *
|
|
||||||
blk_alloc_request(struct request_queue *q, struct bio *bio, struct io_cq *icq,
|
|
||||||
unsigned int flags, gfp_t gfp_mask)
|
|
||||||
{
|
|
||||||
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
|
|
||||||
|
|
||||||
if (!rq)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
blk_rq_init(q, rq);
|
|
||||||
|
|
||||||
rq->cmd_flags = flags | REQ_ALLOCED;
|
|
||||||
|
|
||||||
if (flags & REQ_ELVPRIV) {
|
|
||||||
rq->elv.icq = icq;
|
|
||||||
if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
|
|
||||||
mempool_free(rq, q->rq.rq_pool);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
/* @rq->elv.icq holds on to io_context until @rq is freed */
|
|
||||||
if (icq)
|
|
||||||
get_io_context(icq->ioc);
|
|
||||||
}
|
|
||||||
|
|
||||||
return rq;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ioc_batching returns true if the ioc is a valid batching request and
|
* ioc_batching returns true if the ioc is a valid batching request and
|
||||||
* should be given priority access to a request.
|
* should be given priority access to a request.
|
||||||
|
@ -968,10 +941,25 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
|
||||||
goto fail_alloc;
|
goto fail_alloc;
|
||||||
}
|
}
|
||||||
|
|
||||||
rq = blk_alloc_request(q, bio, icq, rw_flags, gfp_mask);
|
/* allocate and init request */
|
||||||
if (unlikely(!rq))
|
rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
|
||||||
|
if (!rq)
|
||||||
goto fail_alloc;
|
goto fail_alloc;
|
||||||
|
|
||||||
|
blk_rq_init(q, rq);
|
||||||
|
rq->cmd_flags = rw_flags | REQ_ALLOCED;
|
||||||
|
|
||||||
|
if (rw_flags & REQ_ELVPRIV) {
|
||||||
|
rq->elv.icq = icq;
|
||||||
|
if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
|
||||||
|
mempool_free(rq, q->rq.rq_pool);
|
||||||
|
goto fail_alloc;
|
||||||
|
}
|
||||||
|
/* @rq->elv.icq holds on to io_context until @rq is freed */
|
||||||
|
if (icq)
|
||||||
|
get_io_context(icq->ioc);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ioc may be NULL here, and ioc_batching will be false. That's
|
* ioc may be NULL here, and ioc_batching will be false. That's
|
||||||
* OK, if the queue is under the request limit then requests need
|
* OK, if the queue is under the request limit then requests need
|
||||||
|
|
Loading…
Reference in New Issue
Block a user