forked from luck/tmp_suning_uos_patched
ceph: increment/decrement dio counter on async requests
Ceph can in some cases issue an async DIO request, in which case we can
end up calling ceph_end_io_direct before the I/O is actually complete.
That may allow buffered operations to proceed while DIO requests are
still in flight.
Fix this by incrementing the i_dio_count when issuing an async DIO
request, and decrement it when tearing down the aio_req.
Fixes: 321fe13c93
("ceph: add buffered/direct exclusionary locking for reads and writes")
Signed-off-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:
parent
a81bc3102b
commit
6a81749ebe
|
@ -753,6 +753,9 @@ static void ceph_aio_complete(struct inode *inode,
|
|||
if (!atomic_dec_and_test(&aio_req->pending_reqs))
|
||||
return;
|
||||
|
||||
if (aio_req->iocb->ki_flags & IOCB_DIRECT)
|
||||
inode_dio_end(inode);
|
||||
|
||||
ret = aio_req->error;
|
||||
if (!ret)
|
||||
ret = aio_req->total_len;
|
||||
|
@ -1091,6 +1094,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
|
|||
CEPH_CAP_FILE_RD);
|
||||
|
||||
list_splice(&aio_req->osd_reqs, &osd_reqs);
|
||||
inode_dio_begin(inode);
|
||||
while (!list_empty(&osd_reqs)) {
|
||||
req = list_first_entry(&osd_reqs,
|
||||
struct ceph_osd_request,
|
||||
|
|
Loading…
Reference in New Issue
Block a user