forked from luck/tmp_suning_uos_patched
iomap: Allow forcing of waiting for running DIO in iomap_dio_rw()
Filesystems do not support doing IO as asynchronous in some cases. For example in case of unaligned writes or in case file size needs to be extended (e.g. for ext4). Instead of forcing filesystem to wait for AIO in such cases, add argument to iomap_dio_rw() which makes the function wait for IO completion. This also results in executing iomap_dio_complete() inline in iomap_dio_rw() providing its return value to the caller as for ordinary sync IO. Signed-off-by: Jan Kara <jack@suse.cz> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
This commit is contained in:
parent
4f5cafb5cb
commit
13ef954445
|
@ -732,7 +732,8 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to)
|
|||
if (ret)
|
||||
goto out_uninit;
|
||||
|
||||
ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL);
|
||||
ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
|
||||
is_sync_kiocb(iocb));
|
||||
|
||||
gfs2_glock_dq(&gh);
|
||||
out_uninit:
|
||||
|
@ -767,7 +768,8 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
if (offset + len > i_size_read(&ip->i_inode))
|
||||
goto out;
|
||||
|
||||
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL);
|
||||
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
|
||||
is_sync_kiocb(iocb));
|
||||
|
||||
out:
|
||||
gfs2_glock_dq(&gh);
|
||||
|
|
|
@ -392,7 +392,8 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
|
|||
*/
|
||||
ssize_t
|
||||
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
const struct iomap_ops *ops, const struct iomap_dio_ops *dops)
|
||||
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
|
||||
bool wait_for_completion)
|
||||
{
|
||||
struct address_space *mapping = iocb->ki_filp->f_mapping;
|
||||
struct inode *inode = file_inode(iocb->ki_filp);
|
||||
|
@ -400,7 +401,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
loff_t pos = iocb->ki_pos, start = pos;
|
||||
loff_t end = iocb->ki_pos + count - 1, ret = 0;
|
||||
unsigned int flags = IOMAP_DIRECT;
|
||||
bool wait_for_completion = is_sync_kiocb(iocb);
|
||||
struct blk_plug plug;
|
||||
struct iomap_dio *dio;
|
||||
|
||||
|
@ -409,6 +409,9 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
if (!count)
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(is_sync_kiocb(iocb) && !wait_for_completion))
|
||||
return -EIO;
|
||||
|
||||
dio = kmalloc(sizeof(*dio), GFP_KERNEL);
|
||||
if (!dio)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -188,7 +188,7 @@ xfs_file_dio_aio_read(
|
|||
file_accessed(iocb->ki_filp);
|
||||
|
||||
xfs_ilock(ip, XFS_IOLOCK_SHARED);
|
||||
ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
|
||||
ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL, is_sync_kiocb(iocb));
|
||||
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
|
||||
|
||||
return ret;
|
||||
|
@ -547,7 +547,8 @@ xfs_file_dio_aio_write(
|
|||
}
|
||||
|
||||
trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
|
||||
ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, &xfs_dio_write_ops);
|
||||
ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, &xfs_dio_write_ops,
|
||||
is_sync_kiocb(iocb));
|
||||
|
||||
/*
|
||||
* If unaligned, this is the only IO in-flight. If it has not yet
|
||||
|
|
|
@ -195,7 +195,8 @@ struct iomap_dio_ops {
|
|||
};
|
||||
|
||||
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
const struct iomap_ops *ops, const struct iomap_dio_ops *dops);
|
||||
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
|
||||
bool wait_for_completion);
|
||||
int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
|
||||
|
||||
#ifdef CONFIG_SWAP
|
||||
|
|
Loading…
Reference in New Issue
Block a user