forked from luck/tmp_suning_uos_patched
io_uring: fix using under-expanded iters
[ upstream commit cd65869512ab5668a5d16f789bc4da1319c435c4 ] The issue was first described and addressed in 89c2b3b7491820 ("io_uring: reexpand under-reexpanded iters"), but shortly after reimplemented as. cd65869512ab56 ("io_uring: use iov_iter state save/restore helpers"). Here we follow the approach from the second patch but without in-callback resubmissions, fixups for not yet supported in 5.10 short read retries and replacing iov_iter_state with iter copies to not pull even more dependencies, and because it's just much simpler. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
57d01bcae7
commit
8adb751d29
|
@ -3389,6 +3389,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
|
|||
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
|
||||
struct kiocb *kiocb = &req->rw.kiocb;
|
||||
struct iov_iter __iter, *iter = &__iter;
|
||||
struct iov_iter iter_cp;
|
||||
struct io_async_rw *rw = req->async_data;
|
||||
ssize_t io_size, ret, ret2;
|
||||
bool no_async;
|
||||
|
@ -3399,6 +3400,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
|
|||
ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
iter_cp = *iter;
|
||||
io_size = iov_iter_count(iter);
|
||||
req->result = io_size;
|
||||
ret = 0;
|
||||
|
@ -3434,7 +3436,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
|
|||
if (req->file->f_flags & O_NONBLOCK)
|
||||
goto done;
|
||||
/* some cases will consume bytes even on error returns */
|
||||
iov_iter_revert(iter, io_size - iov_iter_count(iter));
|
||||
*iter = iter_cp;
|
||||
ret = 0;
|
||||
goto copy_iov;
|
||||
} else if (ret < 0) {
|
||||
|
@ -3517,6 +3519,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
|
|||
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
|
||||
struct kiocb *kiocb = &req->rw.kiocb;
|
||||
struct iov_iter __iter, *iter = &__iter;
|
||||
struct iov_iter iter_cp;
|
||||
struct io_async_rw *rw = req->async_data;
|
||||
ssize_t ret, ret2, io_size;
|
||||
|
||||
|
@ -3526,6 +3529,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
|
|||
ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
iter_cp = *iter;
|
||||
io_size = iov_iter_count(iter);
|
||||
req->result = io_size;
|
||||
|
||||
|
@ -3587,7 +3591,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
|
|||
} else {
|
||||
copy_iov:
|
||||
/* some cases will consume bytes even on error returns */
|
||||
iov_iter_revert(iter, io_size - iov_iter_count(iter));
|
||||
*iter = iter_cp;
|
||||
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
|
||||
if (!ret)
|
||||
return -EAGAIN;
|
||||
|
|
Loading…
Reference in New Issue
Block a user