forked from luck/tmp_suning_uos_patched
io_uring-5.8-2020-07-05
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl8BDx4QHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpvhzD/4rxzJsn6ukrsxMXFaKIrjZ/hkcRJIMNozz YWu4PwcDvszvZu66MeAu0tnCttzxlIgP8oCm6cx9ImMQwkYIVbV0q1XJ3wmzUQpZ pEDW4j0j8hgcLhfZH9ojUAkTP8TnltakxkrwC6egUvnT0vuKDUy5ISbkl4uxWYpH p4Dq7ASqy8xjtzac/VLTSzBgzhTMSic5NMJY21md9eAaFB1vYBmDyHB3O1bEk4kw pvWGFm7a4qssnAB61SMfq3nWQ9UA0+XX4a+CWEzJIMqj4H6UpjOCQU23X1AlaLJX ILeq26PwoZQF8cS4D83tMnmPWz1LqslBgnUuAGCVLsT7omvhDLM75iFBpMzWglLu No8TlxLZ+Dga04vpjeEptWqSfUS6K879cNJuFGjadBogq06SImIVDHXXTrPhtCGg B9+uFHkOUlIkjM5h2zqdkmhnbf0sWodowIrx7+aL294QVlqnY0uBR9eh6+CSKT+h PhJ+FhN+N6B1dTyryaO5hMjyg0h4ZpvIMT3HBpNXtnRVlUT2+OYN3g5HHt6z//Rp eeJTh7pnY7uT60c8x96kySwQIydXSKBI+7ysLlntgiyvutbzaC5Fq7/f1YTWyNVk zqM/+FuJUsstu0y/GBEDpglpL1+S9VjNcJUDpUMUKwCAkh7TnI/ATo1rn9GiM1n1 SQZ4HcaCYw== =Uawr -----END PGP SIGNATURE----- Merge tag 'io_uring-5.8-2020-07-05' of git://git.kernel.dk/linux-block Pull io_uring fix from Jens Axboe: "Andres reported a regression with the fix that was merged earlier this week, where his setup of using signals to interrupt io_uring CQ waits no longer worked correctly. Fix this, and also limit our use of TWA_SIGNAL to the case where we need it, and continue using TWA_RESUME for task_work as before. Since the original is marked for 5.7 stable, let's flush this one out early" * tag 'io_uring-5.8-2020-07-05' of git://git.kernel.dk/linux-block: io_uring: fix regression with always ignoring signals in io_cqring_wait()
This commit is contained in:
commit
9fbe565cb7
|
@ -4072,14 +4072,22 @@ struct io_poll_table {
|
|||
int error;
|
||||
};
|
||||
|
||||
static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb,
|
||||
int notify)
|
||||
static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
|
||||
{
|
||||
struct task_struct *tsk = req->task;
|
||||
int ret;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
int ret, notify = TWA_RESUME;
|
||||
|
||||
if (req->ctx->flags & IORING_SETUP_SQPOLL)
|
||||
/*
|
||||
* SQPOLL kernel thread doesn't need notification, just a wakeup.
|
||||
* If we're not using an eventfd, then TWA_RESUME is always fine,
|
||||
* as we won't have dependencies between request completions for
|
||||
* other kernel wait conditions.
|
||||
*/
|
||||
if (ctx->flags & IORING_SETUP_SQPOLL)
|
||||
notify = 0;
|
||||
else if (ctx->cq_ev_fd)
|
||||
notify = TWA_SIGNAL;
|
||||
|
||||
ret = task_work_add(tsk, cb, notify);
|
||||
if (!ret)
|
||||
|
@ -4110,7 +4118,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
|
|||
* of executing it. We can't safely execute it anyway, as we may not
|
||||
* have the needed state needed for it anyway.
|
||||
*/
|
||||
ret = io_req_task_work_add(req, &req->task_work, TWA_SIGNAL);
|
||||
ret = io_req_task_work_add(req, &req->task_work);
|
||||
if (unlikely(ret)) {
|
||||
WRITE_ONCE(poll->canceled, true);
|
||||
tsk = io_wq_get_task(req->ctx->io_wq);
|
||||
|
@ -6201,7 +6209,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
|
|||
if (current->task_works)
|
||||
task_work_run();
|
||||
if (signal_pending(current)) {
|
||||
ret = -ERESTARTSYS;
|
||||
if (current->jobctl & JOBCTL_TASK_WORK) {
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->jobctl &= ~JOBCTL_TASK_WORK;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
continue;
|
||||
}
|
||||
ret = -EINTR;
|
||||
break;
|
||||
}
|
||||
if (io_should_wake(&iowq, false))
|
||||
|
@ -6210,7 +6225,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
|
|||
} while (1);
|
||||
finish_wait(&ctx->wait, &iowq.wq);
|
||||
|
||||
restore_saved_sigmask_unless(ret == -ERESTARTSYS);
|
||||
restore_saved_sigmask_unless(ret == -EINTR);
|
||||
|
||||
return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user