io_uring-5.5-2020-01-16
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl4hQEoQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpjZ0D/9X0JXHTlP8qebVw0Rjnb838TJtygwBZ8bm EsvEYP9lOJbR15V2WGWO2daNaaKMouglMQ8OWYMNGvREDtfNBxy3mE8ZVnpG385R RUweqCIK0rHpAfSRr4Nh9GwIeMyomLzOeumjVzXATsUS1o2+bCfv34pe22uikpgx njA2ab389hS2b9fMOFf78odazOMiCQSW7a2dwO1+5TNWtmYCei3SNPZuqZucvRPr 9iSnZswJZb8KqyGyuJo6dQQhvurXgAM8LRglc6KIJ1NpyJCgPzyULYEYOvLyLHLo USvvivi5xFeUQy1x7w72Xu3dQ0Jg+i9nSDiAACM+ehCdVcKC0OcbFcvPJ06iH9V3 RRdBUBJHHXSzklVHpo44iwZcmPNQNAWwM/vtlsrT9ln9fkgLeHG3zsScKOcv9fFw 9YmtmZQkw9Zst5wghiOQsLhwsUndOPLLUbtiNGmUr1eKXeRYekFpO++HI/DwkWhN rFVJiHbMxIP0k7uk54sNPoHrXthfNiiFjOf4eZDV20xwVJ0xenmYpfW8XW447r3W C2dGRtRBbm598OCV0PzXFd1vIUKAr8b8fJwS3gZzZOH0uYbYr79AOn1cs2F//0M0 MUXZo9LHfpfeGkMzimiPZj7lrZEI4LPAjYc1mnt4fXhuzPhYAinkcU3tQRY0T+ia 4YqjdDtD3Q== =MhHp -----END PGP SIGNATURE----- Merge tag 'io_uring-5.5-2020-01-16' of git://git.kernel.dk/linux-block Pull io_uring fixes form Jens Axboe: - Ensure ->result is always set when IO is retried (Bijan) - In conjunction with the above, fix a regression in polled IO issue when retried (me/Bijan) - Don't setup async context for read/write fixed, otherwise we may wrongly map the iovec on retry (me) - Cancel io-wq work if we fail getting mm reference (me) - Ensure dependent work is always initialized correctly (me) - Only allow original task to submit IO, don't allow it from a passed ring fd (me) * tag 'io_uring-5.5-2020-01-16' of git://git.kernel.dk/linux-block: io_uring: only allow submit from owning task io_uring: ensure workqueue offload grabs ring mutex for poll list io_uring: clear req->result always before issuing a read/write request io_uring: be consistent in assigning next work from handler io-wq: cancel work if we fail getting a mm reference io_uring: don't setup async context for read/write fixed
This commit is contained in:
commit
25e73aadf2
12
fs/io-wq.c
12
fs/io-wq.c
|
@ -445,10 +445,14 @@ static void io_worker_handle_work(struct io_worker *worker)
|
|||
task_unlock(current);
|
||||
}
|
||||
if ((work->flags & IO_WQ_WORK_NEEDS_USER) && !worker->mm &&
|
||||
wq->mm && mmget_not_zero(wq->mm)) {
|
||||
use_mm(wq->mm);
|
||||
set_fs(USER_DS);
|
||||
worker->mm = wq->mm;
|
||||
wq->mm) {
|
||||
if (mmget_not_zero(wq->mm)) {
|
||||
use_mm(wq->mm);
|
||||
set_fs(USER_DS);
|
||||
worker->mm = wq->mm;
|
||||
} else {
|
||||
work->flags |= IO_WQ_WORK_CANCEL;
|
||||
}
|
||||
}
|
||||
if (!worker->creds)
|
||||
worker->creds = override_creds(wq->creds);
|
||||
|
|
|
@ -1786,6 +1786,9 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
|
|||
struct iovec *iovec, struct iovec *fast_iov,
|
||||
struct iov_iter *iter)
|
||||
{
|
||||
if (req->opcode == IORING_OP_READ_FIXED ||
|
||||
req->opcode == IORING_OP_WRITE_FIXED)
|
||||
return 0;
|
||||
if (!req->io && io_alloc_async_ctx(req))
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1840,6 +1843,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
|
|||
if (!force_nonblock)
|
||||
req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
|
||||
|
||||
req->result = 0;
|
||||
io_size = ret;
|
||||
if (req->flags & REQ_F_LINK)
|
||||
req->result = io_size;
|
||||
|
@ -1927,6 +1931,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
|
|||
if (!force_nonblock)
|
||||
req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
|
||||
|
||||
req->result = 0;
|
||||
io_size = ret;
|
||||
if (req->flags & REQ_F_LINK)
|
||||
req->result = io_size;
|
||||
|
@ -2034,6 +2039,28 @@ static bool io_req_cancelled(struct io_kiocb *req)
|
|||
return false;
|
||||
}
|
||||
|
||||
static void io_link_work_cb(struct io_wq_work **workptr)
|
||||
{
|
||||
struct io_wq_work *work = *workptr;
|
||||
struct io_kiocb *link = work->data;
|
||||
|
||||
io_queue_linked_timeout(link);
|
||||
work->func = io_wq_submit_work;
|
||||
}
|
||||
|
||||
static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
|
||||
{
|
||||
struct io_kiocb *link;
|
||||
|
||||
io_prep_async_work(nxt, &link);
|
||||
*workptr = &nxt->work;
|
||||
if (link) {
|
||||
nxt->work.flags |= IO_WQ_WORK_CB;
|
||||
nxt->work.func = io_link_work_cb;
|
||||
nxt->work.data = link;
|
||||
}
|
||||
}
|
||||
|
||||
static void io_fsync_finish(struct io_wq_work **workptr)
|
||||
{
|
||||
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
|
||||
|
@ -2052,7 +2079,7 @@ static void io_fsync_finish(struct io_wq_work **workptr)
|
|||
io_cqring_add_event(req, ret);
|
||||
io_put_req_find_next(req, &nxt);
|
||||
if (nxt)
|
||||
*workptr = &nxt->work;
|
||||
io_wq_assign_next(workptr, nxt);
|
||||
}
|
||||
|
||||
static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
|
||||
|
@ -2108,7 +2135,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
|
|||
io_cqring_add_event(req, ret);
|
||||
io_put_req_find_next(req, &nxt);
|
||||
if (nxt)
|
||||
*workptr = &nxt->work;
|
||||
io_wq_assign_next(workptr, nxt);
|
||||
}
|
||||
|
||||
static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
|
||||
|
@ -2374,7 +2401,7 @@ static void io_accept_finish(struct io_wq_work **workptr)
|
|||
return;
|
||||
__io_accept(req, &nxt, false);
|
||||
if (nxt)
|
||||
*workptr = &nxt->work;
|
||||
io_wq_assign_next(workptr, nxt);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -2605,7 +2632,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
|
|||
req_set_fail_links(req);
|
||||
io_put_req_find_next(req, &nxt);
|
||||
if (nxt)
|
||||
*workptr = &nxt->work;
|
||||
io_wq_assign_next(workptr, nxt);
|
||||
}
|
||||
|
||||
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
||||
|
@ -3259,24 +3286,24 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
|||
return ret;
|
||||
|
||||
if (ctx->flags & IORING_SETUP_IOPOLL) {
|
||||
const bool in_async = io_wq_current_is_worker();
|
||||
|
||||
if (req->result == -EAGAIN)
|
||||
return -EAGAIN;
|
||||
|
||||
/* workqueue context doesn't hold uring_lock, grab it now */
|
||||
if (in_async)
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
|
||||
io_iopoll_req_issued(req);
|
||||
|
||||
if (in_async)
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void io_link_work_cb(struct io_wq_work **workptr)
|
||||
{
|
||||
struct io_wq_work *work = *workptr;
|
||||
struct io_kiocb *link = work->data;
|
||||
|
||||
io_queue_linked_timeout(link);
|
||||
work->func = io_wq_submit_work;
|
||||
}
|
||||
|
||||
static void io_wq_submit_work(struct io_wq_work **workptr)
|
||||
{
|
||||
struct io_wq_work *work = *workptr;
|
||||
|
@ -3313,17 +3340,8 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
|
|||
}
|
||||
|
||||
/* if a dependent link is ready, pass it back */
|
||||
if (!ret && nxt) {
|
||||
struct io_kiocb *link;
|
||||
|
||||
io_prep_async_work(nxt, &link);
|
||||
*workptr = &nxt->work;
|
||||
if (link) {
|
||||
nxt->work.flags |= IO_WQ_WORK_CB;
|
||||
nxt->work.func = io_link_work_cb;
|
||||
nxt->work.data = link;
|
||||
}
|
||||
}
|
||||
if (!ret && nxt)
|
||||
io_wq_assign_next(workptr, nxt);
|
||||
}
|
||||
|
||||
static bool io_req_op_valid(int op)
|
||||
|
@ -5141,6 +5159,12 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
|
|||
} else if (to_submit) {
|
||||
struct mm_struct *cur_mm;
|
||||
|
||||
if (current->mm != ctx->sqo_mm ||
|
||||
current_cred() != ctx->creds) {
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
to_submit = min(to_submit, ctx->sq_entries);
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
/* already have mm, so io_submit_sqes() won't try to grab it */
|
||||
|
|
Loading…
Reference in New Issue
Block a user