blk-mq: unshared timeout handler
Duplicate the (small) timeout handler in blk-mq so that we can pass arguments more easily to the driver timeout handler. This enables the next patch. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
81481eb423
commit
46f92d42ee
@ -525,9 +525,15 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_tag_to_rq);
|
EXPORT_SYMBOL(blk_mq_tag_to_rq);
|
||||||
|
|
||||||
static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
|
struct blk_mq_timeout_data {
|
||||||
|
unsigned long next;
|
||||||
|
unsigned int next_set;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void blk_mq_rq_timed_out(struct request *req)
|
||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
struct blk_mq_ops *ops = req->q->mq_ops;
|
||||||
|
enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We know that complete is set at this point. If STARTED isn't set
|
* We know that complete is set at this point. If STARTED isn't set
|
||||||
@ -538,27 +544,43 @@ static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
|
|||||||
* we both flags will get cleared. So check here again, and ignore
|
* we both flags will get cleared. So check here again, and ignore
|
||||||
* a timeout event with a request that isn't active.
|
* a timeout event with a request that isn't active.
|
||||||
*/
|
*/
|
||||||
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
|
||||||
return BLK_EH_NOT_HANDLED;
|
return;
|
||||||
|
|
||||||
if (!q->mq_ops->timeout)
|
if (ops->timeout)
|
||||||
return BLK_EH_RESET_TIMER;
|
ret = ops->timeout(req);
|
||||||
|
|
||||||
return q->mq_ops->timeout(rq);
|
switch (ret) {
|
||||||
|
case BLK_EH_HANDLED:
|
||||||
|
__blk_mq_complete_request(req);
|
||||||
|
break;
|
||||||
|
case BLK_EH_RESET_TIMER:
|
||||||
|
blk_add_timer(req);
|
||||||
|
blk_clear_rq_complete(req);
|
||||||
|
break;
|
||||||
|
case BLK_EH_NOT_HANDLED:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
printk(KERN_ERR "block: bad eh return: %d\n", ret);
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct blk_mq_timeout_data {
|
|
||||||
unsigned long next;
|
|
||||||
unsigned int next_set;
|
|
||||||
};
|
|
||||||
|
|
||||||
static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
||||||
struct request *rq, void *priv, bool reserved)
|
struct request *rq, void *priv, bool reserved)
|
||||||
{
|
{
|
||||||
struct blk_mq_timeout_data *data = priv;
|
struct blk_mq_timeout_data *data = priv;
|
||||||
|
|
||||||
if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
||||||
blk_rq_check_expired(rq, &data->next, &data->next_set);
|
return;
|
||||||
|
|
||||||
|
if (time_after_eq(jiffies, rq->deadline)) {
|
||||||
|
if (!blk_mark_rq_complete(rq))
|
||||||
|
blk_mq_rq_timed_out(rq);
|
||||||
|
} else if (!data->next_set || time_after(data->next, rq->deadline)) {
|
||||||
|
data->next = rq->deadline;
|
||||||
|
data->next_set = 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_rq_timer(unsigned long priv)
|
static void blk_mq_rq_timer(unsigned long priv)
|
||||||
@ -1781,7 +1803,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|||||||
else
|
else
|
||||||
blk_queue_make_request(q, blk_sq_make_request);
|
blk_queue_make_request(q, blk_sq_make_request);
|
||||||
|
|
||||||
blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
|
|
||||||
if (set->timeout)
|
if (set->timeout)
|
||||||
blk_queue_rq_timeout(q, set->timeout);
|
blk_queue_rq_timeout(q, set->timeout);
|
||||||
|
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
#include <linux/fault-inject.h>
|
#include <linux/fault-inject.h>
|
||||||
|
|
||||||
#include "blk.h"
|
#include "blk.h"
|
||||||
#include "blk-mq.h"
|
|
||||||
|
|
||||||
#ifdef CONFIG_FAIL_IO_TIMEOUT
|
#ifdef CONFIG_FAIL_IO_TIMEOUT
|
||||||
|
|
||||||
@ -90,9 +89,6 @@ static void blk_rq_timed_out(struct request *req)
|
|||||||
switch (ret) {
|
switch (ret) {
|
||||||
case BLK_EH_HANDLED:
|
case BLK_EH_HANDLED:
|
||||||
/* Can we use req->errors here? */
|
/* Can we use req->errors here? */
|
||||||
if (q->mq_ops)
|
|
||||||
__blk_mq_complete_request(req);
|
|
||||||
else
|
|
||||||
__blk_complete_request(req);
|
__blk_complete_request(req);
|
||||||
break;
|
break;
|
||||||
case BLK_EH_RESET_TIMER:
|
case BLK_EH_RESET_TIMER:
|
||||||
@ -113,7 +109,7 @@ static void blk_rq_timed_out(struct request *req)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
|
static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
|
||||||
unsigned int *next_set)
|
unsigned int *next_set)
|
||||||
{
|
{
|
||||||
if (time_after_eq(jiffies, rq->deadline)) {
|
if (time_after_eq(jiffies, rq->deadline)) {
|
||||||
|
@ -38,8 +38,6 @@ bool __blk_end_bidi_request(struct request *rq, int error,
|
|||||||
unsigned int nr_bytes, unsigned int bidi_bytes);
|
unsigned int nr_bytes, unsigned int bidi_bytes);
|
||||||
|
|
||||||
void blk_rq_timed_out_timer(unsigned long data);
|
void blk_rq_timed_out_timer(unsigned long data);
|
||||||
void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
|
|
||||||
unsigned int *next_set);
|
|
||||||
unsigned long blk_rq_timeout(unsigned long timeout);
|
unsigned long blk_rq_timeout(unsigned long timeout);
|
||||||
void blk_add_timer(struct request *req);
|
void blk_add_timer(struct request *req);
|
||||||
void blk_delete_timer(struct request *);
|
void blk_delete_timer(struct request *);
|
||||||
|
Loading…
Reference in New Issue
Block a user