forked from luck/tmp_suning_uos_patched
io-wq: reorder cancellation pending -> running
Go all over all pending lists and cancel works there, and only then try to match running requests. No functional changes here, just a preparation for bulk cancellation. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
59960b9deb
commit
f4c2665e33
54
fs/io-wq.c
54
fs/io-wq.c
|
@ -927,19 +927,14 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
|
||||
struct io_cb_cancel_data *match)
|
||||
static bool io_wqe_cancel_pending_work(struct io_wqe *wqe,
|
||||
struct io_cb_cancel_data *match)
|
||||
{
|
||||
struct io_wq_work_node *node, *prev;
|
||||
struct io_wq_work *work;
|
||||
unsigned long flags;
|
||||
bool found = false;
|
||||
|
||||
/*
|
||||
* First check pending list, if we're lucky we can just remove it
|
||||
* from there. CANCEL_OK means that the work is returned as-new,
|
||||
* no completion will be posted for it.
|
||||
*/
|
||||
spin_lock_irqsave(&wqe->lock, flags);
|
||||
wq_list_for_each(node, prev, &wqe->work_list) {
|
||||
work = container_of(node, struct io_wq_work, list);
|
||||
|
@ -952,21 +947,20 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
|
|||
}
|
||||
spin_unlock_irqrestore(&wqe->lock, flags);
|
||||
|
||||
if (found) {
|
||||
if (found)
|
||||
io_run_cancel(work, wqe);
|
||||
return IO_WQ_CANCEL_OK;
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
||||
static bool io_wqe_cancel_running_work(struct io_wqe *wqe,
|
||||
struct io_cb_cancel_data *match)
|
||||
{
|
||||
bool found;
|
||||
|
||||
/*
|
||||
* Now check if a free (going busy) or busy worker has the work
|
||||
* currently running. If we find it there, we'll return CANCEL_RUNNING
|
||||
* as an indication that we attempt to signal cancellation. The
|
||||
* completion will run normally in this case.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
|
||||
rcu_read_unlock();
|
||||
return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
|
||||
return found;
|
||||
}
|
||||
|
||||
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
|
||||
|
@ -976,18 +970,34 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
|
|||
.fn = cancel,
|
||||
.data = data,
|
||||
};
|
||||
enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
|
||||
int node;
|
||||
|
||||
/*
|
||||
* First check pending list, if we're lucky we can just remove it
|
||||
* from there. CANCEL_OK means that the work is returned as-new,
|
||||
* no completion will be posted for it.
|
||||
*/
|
||||
for_each_node(node) {
|
||||
struct io_wqe *wqe = wq->wqes[node];
|
||||
|
||||
ret = io_wqe_cancel_work(wqe, &match);
|
||||
if (ret != IO_WQ_CANCEL_NOTFOUND)
|
||||
break;
|
||||
if (io_wqe_cancel_pending_work(wqe, &match))
|
||||
return IO_WQ_CANCEL_OK;
|
||||
}
|
||||
|
||||
return ret;
|
||||
/*
|
||||
* Now check if a free (going busy) or busy worker has the work
|
||||
* currently running. If we find it there, we'll return CANCEL_RUNNING
|
||||
* as an indication that we attempt to signal cancellation. The
|
||||
* completion will run normally in this case.
|
||||
*/
|
||||
for_each_node(node) {
|
||||
struct io_wqe *wqe = wq->wqes[node];
|
||||
|
||||
if (io_wqe_cancel_running_work(wqe, &match))
|
||||
return IO_WQ_CANCEL_RUNNING;
|
||||
}
|
||||
|
||||
return IO_WQ_CANCEL_NOTFOUND;
|
||||
}
|
||||
|
||||
static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
|
||||
|
|
Loading…
Reference in New Issue
Block a user