virtiofs: implement dax read/write operations
This patch implements basic DAX support. mmap() is not implemented yet and will come in later patches. This patch looks into implemeting read/write. We make use of interval tree to keep track of per inode dax mappings. Do not use dax for file extending writes, instead just send WRITE message to daemon (like we do for direct I/O path). This will keep write and i_size change atomic w.r.t crash. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Liu Bo <bo.liu@linux.alibaba.com> Signed-off-by: Peng Tao <tao.peng@linux.alibaba.com> Cc: Dave Chinner <david@fromorbit.com> Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
This commit is contained in:
parent
ceec02d435
commit
c2d0ad00d9
|
@ -42,6 +42,7 @@ config VIRTIO_FS
|
|||
config FUSE_DAX
|
||||
bool "Virtio Filesystem Direct Host Memory Access support"
|
||||
default y
|
||||
select INTERVAL_TREE
|
||||
depends on VIRTIO_FS
|
||||
depends on FS_DAX
|
||||
depends on DAX_DRIVER
|
||||
|
|
565
fs/fuse/dax.c
565
fs/fuse/dax.c
|
@ -7,7 +7,10 @@
|
|||
#include "fuse_i.h"
|
||||
|
||||
#include <linux/dax.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/pfn_t.h>
|
||||
#include <linux/iomap.h>
|
||||
#include <linux/interval_tree.h>
|
||||
|
||||
/*
|
||||
* Default memory range size. A power of 2 so it agrees with common FUSE_INIT
|
||||
|
@ -22,22 +25,556 @@ struct fuse_dax_mapping {
|
|||
/* Will connect in fcd->free_ranges to keep track of free memory */
|
||||
struct list_head list;
|
||||
|
||||
/* For interval tree in file/inode */
|
||||
struct interval_tree_node itn;
|
||||
|
||||
/** Position in DAX window */
|
||||
u64 window_offset;
|
||||
|
||||
/** Length of mapping, in bytes */
|
||||
loff_t length;
|
||||
|
||||
/* Is this mapping read-only or read-write */
|
||||
bool writable;
|
||||
};
|
||||
|
||||
/* Per-inode dax map */
|
||||
struct fuse_inode_dax {
|
||||
/* Semaphore to protect modifications to the dmap tree */
|
||||
struct rw_semaphore sem;
|
||||
|
||||
/* Sorted rb tree of struct fuse_dax_mapping elements */
|
||||
struct rb_root_cached tree;
|
||||
unsigned long nr;
|
||||
};
|
||||
|
||||
struct fuse_conn_dax {
|
||||
/* DAX device */
|
||||
struct dax_device *dev;
|
||||
|
||||
/* Lock protecting accessess to members of this structure */
|
||||
spinlock_t lock;
|
||||
|
||||
/* DAX Window Free Ranges */
|
||||
long nr_free_ranges;
|
||||
struct list_head free_ranges;
|
||||
};
|
||||
|
||||
static inline struct fuse_dax_mapping *
|
||||
node_to_dmap(struct interval_tree_node *node)
|
||||
{
|
||||
if (!node)
|
||||
return NULL;
|
||||
|
||||
return container_of(node, struct fuse_dax_mapping, itn);
|
||||
}
|
||||
|
||||
static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd)
|
||||
{
|
||||
struct fuse_dax_mapping *dmap;
|
||||
|
||||
spin_lock(&fcd->lock);
|
||||
dmap = list_first_entry_or_null(&fcd->free_ranges,
|
||||
struct fuse_dax_mapping, list);
|
||||
if (dmap) {
|
||||
list_del_init(&dmap->list);
|
||||
WARN_ON(fcd->nr_free_ranges <= 0);
|
||||
fcd->nr_free_ranges--;
|
||||
}
|
||||
spin_unlock(&fcd->lock);
|
||||
return dmap;
|
||||
}
|
||||
|
||||
/* This assumes fcd->lock is held */
|
||||
static void __dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
|
||||
struct fuse_dax_mapping *dmap)
|
||||
{
|
||||
list_add_tail(&dmap->list, &fcd->free_ranges);
|
||||
fcd->nr_free_ranges++;
|
||||
}
|
||||
|
||||
static void dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
|
||||
struct fuse_dax_mapping *dmap)
|
||||
{
|
||||
/* Return fuse_dax_mapping to free list */
|
||||
spin_lock(&fcd->lock);
|
||||
__dmap_add_to_free_pool(fcd, dmap);
|
||||
spin_unlock(&fcd->lock);
|
||||
}
|
||||
|
||||
static int fuse_setup_one_mapping(struct inode *inode, unsigned long start_idx,
|
||||
struct fuse_dax_mapping *dmap, bool writable,
|
||||
bool upgrade)
|
||||
{
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_conn_dax *fcd = fc->dax;
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_setupmapping_in inarg;
|
||||
loff_t offset = start_idx << FUSE_DAX_SHIFT;
|
||||
FUSE_ARGS(args);
|
||||
ssize_t err;
|
||||
|
||||
WARN_ON(fcd->nr_free_ranges < 0);
|
||||
|
||||
/* Ask fuse daemon to setup mapping */
|
||||
memset(&inarg, 0, sizeof(inarg));
|
||||
inarg.foffset = offset;
|
||||
inarg.fh = -1;
|
||||
inarg.moffset = dmap->window_offset;
|
||||
inarg.len = FUSE_DAX_SZ;
|
||||
inarg.flags |= FUSE_SETUPMAPPING_FLAG_READ;
|
||||
if (writable)
|
||||
inarg.flags |= FUSE_SETUPMAPPING_FLAG_WRITE;
|
||||
args.opcode = FUSE_SETUPMAPPING;
|
||||
args.nodeid = fi->nodeid;
|
||||
args.in_numargs = 1;
|
||||
args.in_args[0].size = sizeof(inarg);
|
||||
args.in_args[0].value = &inarg;
|
||||
err = fuse_simple_request(fc, &args);
|
||||
if (err < 0)
|
||||
return err;
|
||||
dmap->writable = writable;
|
||||
if (!upgrade) {
|
||||
dmap->itn.start = dmap->itn.last = start_idx;
|
||||
/* Protected by fi->dax->sem */
|
||||
interval_tree_insert(&dmap->itn, &fi->dax->tree);
|
||||
fi->dax->nr++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fuse_send_removemapping(struct inode *inode,
|
||||
struct fuse_removemapping_in *inargp,
|
||||
struct fuse_removemapping_one *remove_one)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
FUSE_ARGS(args);
|
||||
|
||||
args.opcode = FUSE_REMOVEMAPPING;
|
||||
args.nodeid = fi->nodeid;
|
||||
args.in_numargs = 2;
|
||||
args.in_args[0].size = sizeof(*inargp);
|
||||
args.in_args[0].value = inargp;
|
||||
args.in_args[1].size = inargp->count * sizeof(*remove_one);
|
||||
args.in_args[1].value = remove_one;
|
||||
return fuse_simple_request(fc, &args);
|
||||
}
|
||||
|
||||
static int dmap_removemapping_list(struct inode *inode, unsigned int num,
|
||||
struct list_head *to_remove)
|
||||
{
|
||||
struct fuse_removemapping_one *remove_one, *ptr;
|
||||
struct fuse_removemapping_in inarg;
|
||||
struct fuse_dax_mapping *dmap;
|
||||
int ret, i = 0, nr_alloc;
|
||||
|
||||
nr_alloc = min_t(unsigned int, num, FUSE_REMOVEMAPPING_MAX_ENTRY);
|
||||
remove_one = kmalloc_array(nr_alloc, sizeof(*remove_one), GFP_NOFS);
|
||||
if (!remove_one)
|
||||
return -ENOMEM;
|
||||
|
||||
ptr = remove_one;
|
||||
list_for_each_entry(dmap, to_remove, list) {
|
||||
ptr->moffset = dmap->window_offset;
|
||||
ptr->len = dmap->length;
|
||||
ptr++;
|
||||
i++;
|
||||
num--;
|
||||
if (i >= nr_alloc || num == 0) {
|
||||
memset(&inarg, 0, sizeof(inarg));
|
||||
inarg.count = i;
|
||||
ret = fuse_send_removemapping(inode, &inarg,
|
||||
remove_one);
|
||||
if (ret)
|
||||
goto out;
|
||||
ptr = remove_one;
|
||||
i = 0;
|
||||
}
|
||||
}
|
||||
out:
|
||||
kfree(remove_one);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cleanup dmap entry and add back to free list. This should be called with
|
||||
* fcd->lock held.
|
||||
*/
|
||||
static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax *fcd,
|
||||
struct fuse_dax_mapping *dmap)
|
||||
{
|
||||
pr_debug("fuse: freeing memory range start_idx=0x%lx end_idx=0x%lx window_offset=0x%llx length=0x%llx\n",
|
||||
dmap->itn.start, dmap->itn.last, dmap->window_offset,
|
||||
dmap->length);
|
||||
dmap->itn.start = dmap->itn.last = 0;
|
||||
__dmap_add_to_free_pool(fcd, dmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free inode dmap entries whose range falls inside [start, end].
|
||||
* Does not take any locks. At this point of time it should only be
|
||||
* called from evict_inode() path where we know all dmap entries can be
|
||||
* reclaimed.
|
||||
*/
|
||||
static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd,
|
||||
struct inode *inode,
|
||||
loff_t start, loff_t end)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_dax_mapping *dmap, *n;
|
||||
int err, num = 0;
|
||||
LIST_HEAD(to_remove);
|
||||
unsigned long start_idx = start >> FUSE_DAX_SHIFT;
|
||||
unsigned long end_idx = end >> FUSE_DAX_SHIFT;
|
||||
struct interval_tree_node *node;
|
||||
|
||||
while (1) {
|
||||
node = interval_tree_iter_first(&fi->dax->tree, start_idx,
|
||||
end_idx);
|
||||
if (!node)
|
||||
break;
|
||||
dmap = node_to_dmap(node);
|
||||
interval_tree_remove(&dmap->itn, &fi->dax->tree);
|
||||
num++;
|
||||
list_add(&dmap->list, &to_remove);
|
||||
}
|
||||
|
||||
/* Nothing to remove */
|
||||
if (list_empty(&to_remove))
|
||||
return;
|
||||
|
||||
WARN_ON(fi->dax->nr < num);
|
||||
fi->dax->nr -= num;
|
||||
err = dmap_removemapping_list(inode, num, &to_remove);
|
||||
if (err && err != -ENOTCONN) {
|
||||
pr_warn("Failed to removemappings. start=0x%llx end=0x%llx\n",
|
||||
start, end);
|
||||
}
|
||||
spin_lock(&fcd->lock);
|
||||
list_for_each_entry_safe(dmap, n, &to_remove, list) {
|
||||
list_del_init(&dmap->list);
|
||||
dmap_reinit_add_to_free_pool(fcd, dmap);
|
||||
}
|
||||
spin_unlock(&fcd->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* It is called from evict_inode() and by that time inode is going away. So
|
||||
* this function does not take any locks like fi->dax->sem for traversing
|
||||
* that fuse inode interval tree. If that lock is taken then lock validator
|
||||
* complains of deadlock situation w.r.t fs_reclaim lock.
|
||||
*/
|
||||
void fuse_dax_inode_cleanup(struct inode *inode)
|
||||
{
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
/*
|
||||
* fuse_evict_inode() has already called truncate_inode_pages_final()
|
||||
* before we arrive here. So we should not have to worry about any
|
||||
* pages/exception entries still associated with inode.
|
||||
*/
|
||||
inode_reclaim_dmap_range(fc->dax, inode, 0, -1);
|
||||
WARN_ON(fi->dax->nr);
|
||||
}
|
||||
|
||||
static void fuse_fill_iomap_hole(struct iomap *iomap, loff_t length)
|
||||
{
|
||||
iomap->addr = IOMAP_NULL_ADDR;
|
||||
iomap->length = length;
|
||||
iomap->type = IOMAP_HOLE;
|
||||
}
|
||||
|
||||
static void fuse_fill_iomap(struct inode *inode, loff_t pos, loff_t length,
|
||||
struct iomap *iomap, struct fuse_dax_mapping *dmap,
|
||||
unsigned int flags)
|
||||
{
|
||||
loff_t offset, len;
|
||||
loff_t i_size = i_size_read(inode);
|
||||
|
||||
offset = pos - (dmap->itn.start << FUSE_DAX_SHIFT);
|
||||
len = min(length, dmap->length - offset);
|
||||
|
||||
/* If length is beyond end of file, truncate further */
|
||||
if (pos + len > i_size)
|
||||
len = i_size - pos;
|
||||
|
||||
if (len > 0) {
|
||||
iomap->addr = dmap->window_offset + offset;
|
||||
iomap->length = len;
|
||||
if (flags & IOMAP_FAULT)
|
||||
iomap->length = ALIGN(len, PAGE_SIZE);
|
||||
iomap->type = IOMAP_MAPPED;
|
||||
} else {
|
||||
/* Mapping beyond end of file is hole */
|
||||
fuse_fill_iomap_hole(iomap, length);
|
||||
}
|
||||
}
|
||||
|
||||
static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos,
|
||||
loff_t length, unsigned int flags,
|
||||
struct iomap *iomap)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_conn_dax *fcd = fc->dax;
|
||||
struct fuse_dax_mapping *dmap, *alloc_dmap = NULL;
|
||||
int ret;
|
||||
bool writable = flags & IOMAP_WRITE;
|
||||
unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
|
||||
struct interval_tree_node *node;
|
||||
|
||||
alloc_dmap = alloc_dax_mapping(fcd);
|
||||
if (!alloc_dmap)
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* Take write lock so that only one caller can try to setup mapping
|
||||
* and other waits.
|
||||
*/
|
||||
down_write(&fi->dax->sem);
|
||||
/*
|
||||
* We dropped lock. Check again if somebody else setup
|
||||
* mapping already.
|
||||
*/
|
||||
node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
|
||||
if (node) {
|
||||
dmap = node_to_dmap(node);
|
||||
fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
|
||||
dmap_add_to_free_pool(fcd, alloc_dmap);
|
||||
up_write(&fi->dax->sem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Setup one mapping */
|
||||
ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, alloc_dmap,
|
||||
writable, false);
|
||||
if (ret < 0) {
|
||||
dmap_add_to_free_pool(fcd, alloc_dmap);
|
||||
up_write(&fi->dax->sem);
|
||||
return ret;
|
||||
}
|
||||
fuse_fill_iomap(inode, pos, length, iomap, alloc_dmap, flags);
|
||||
up_write(&fi->dax->sem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos,
|
||||
loff_t length, unsigned int flags,
|
||||
struct iomap *iomap)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_dax_mapping *dmap;
|
||||
int ret;
|
||||
unsigned long idx = pos >> FUSE_DAX_SHIFT;
|
||||
struct interval_tree_node *node;
|
||||
|
||||
/*
|
||||
* Take exclusive lock so that only one caller can try to setup
|
||||
* mapping and others wait.
|
||||
*/
|
||||
down_write(&fi->dax->sem);
|
||||
node = interval_tree_iter_first(&fi->dax->tree, idx, idx);
|
||||
|
||||
/* We are holding either inode lock or i_mmap_sem, and that should
|
||||
* ensure that dmap can't reclaimed or truncated and it should still
|
||||
* be there in tree despite the fact we dropped and re-acquired the
|
||||
* lock.
|
||||
*/
|
||||
ret = -EIO;
|
||||
if (WARN_ON(!node))
|
||||
goto out_err;
|
||||
|
||||
dmap = node_to_dmap(node);
|
||||
|
||||
/* Maybe another thread already upgraded mapping while we were not
|
||||
* holding lock.
|
||||
*/
|
||||
if (dmap->writable) {
|
||||
ret = 0;
|
||||
goto out_fill_iomap;
|
||||
}
|
||||
|
||||
ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, dmap, true,
|
||||
true);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
out_fill_iomap:
|
||||
fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
|
||||
out_err:
|
||||
up_write(&fi->dax->sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* This is just for DAX and the mapping is ephemeral, do not use it for other
|
||||
* purposes since there is no block device with a permanent mapping.
|
||||
*/
|
||||
static int fuse_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
|
||||
unsigned int flags, struct iomap *iomap,
|
||||
struct iomap *srcmap)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_dax_mapping *dmap;
|
||||
bool writable = flags & IOMAP_WRITE;
|
||||
unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
|
||||
struct interval_tree_node *node;
|
||||
|
||||
/* We don't support FIEMAP */
|
||||
if (WARN_ON(flags & IOMAP_REPORT))
|
||||
return -EIO;
|
||||
|
||||
iomap->offset = pos;
|
||||
iomap->flags = 0;
|
||||
iomap->bdev = NULL;
|
||||
iomap->dax_dev = fc->dax->dev;
|
||||
|
||||
/*
|
||||
* Both read/write and mmap path can race here. So we need something
|
||||
* to make sure if we are setting up mapping, then other path waits
|
||||
*
|
||||
* For now, use a semaphore for this. It probably needs to be
|
||||
* optimized later.
|
||||
*/
|
||||
down_read(&fi->dax->sem);
|
||||
node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
|
||||
if (node) {
|
||||
dmap = node_to_dmap(node);
|
||||
if (writable && !dmap->writable) {
|
||||
/* Upgrade read-only mapping to read-write. This will
|
||||
* require exclusive fi->dax->sem lock as we don't want
|
||||
* two threads to be trying to this simultaneously
|
||||
* for same dmap. So drop shared lock and acquire
|
||||
* exclusive lock.
|
||||
*/
|
||||
up_read(&fi->dax->sem);
|
||||
pr_debug("%s: Upgrading mapping at offset 0x%llx length 0x%llx\n",
|
||||
__func__, pos, length);
|
||||
return fuse_upgrade_dax_mapping(inode, pos, length,
|
||||
flags, iomap);
|
||||
} else {
|
||||
fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
|
||||
up_read(&fi->dax->sem);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
up_read(&fi->dax->sem);
|
||||
pr_debug("%s: no mapping at offset 0x%llx length 0x%llx\n",
|
||||
__func__, pos, length);
|
||||
if (pos >= i_size_read(inode))
|
||||
goto iomap_hole;
|
||||
|
||||
return fuse_setup_new_dax_mapping(inode, pos, length, flags,
|
||||
iomap);
|
||||
}
|
||||
|
||||
/*
|
||||
* If read beyond end of file happnes, fs code seems to return
|
||||
* it as hole
|
||||
*/
|
||||
iomap_hole:
|
||||
fuse_fill_iomap_hole(iomap, length);
|
||||
pr_debug("%s returning hole mapping. pos=0x%llx length_asked=0x%llx length_returned=0x%llx\n",
|
||||
__func__, pos, length, iomap->length);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fuse_iomap_end(struct inode *inode, loff_t pos, loff_t length,
|
||||
ssize_t written, unsigned int flags,
|
||||
struct iomap *iomap)
|
||||
{
|
||||
/* DAX writes beyond end-of-file aren't handled using iomap, so the
|
||||
* file size is unchanged and there is nothing to do here.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct iomap_ops fuse_iomap_ops = {
|
||||
.iomap_begin = fuse_iomap_begin,
|
||||
.iomap_end = fuse_iomap_end,
|
||||
};
|
||||
|
||||
ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
struct inode *inode = file_inode(iocb->ki_filp);
|
||||
ssize_t ret;
|
||||
|
||||
if (iocb->ki_flags & IOCB_NOWAIT) {
|
||||
if (!inode_trylock_shared(inode))
|
||||
return -EAGAIN;
|
||||
} else {
|
||||
inode_lock_shared(inode);
|
||||
}
|
||||
|
||||
ret = dax_iomap_rw(iocb, to, &fuse_iomap_ops);
|
||||
inode_unlock_shared(inode);
|
||||
|
||||
/* TODO file_accessed(iocb->f_filp) */
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool file_extending_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
struct inode *inode = file_inode(iocb->ki_filp);
|
||||
|
||||
return (iov_iter_rw(from) == WRITE &&
|
||||
((iocb->ki_pos) >= i_size_read(inode) ||
|
||||
(iocb->ki_pos + iov_iter_count(from) > i_size_read(inode))));
|
||||
}
|
||||
|
||||
static ssize_t fuse_dax_direct_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
struct inode *inode = file_inode(iocb->ki_filp);
|
||||
struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
|
||||
ssize_t ret;
|
||||
|
||||
ret = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
fuse_invalidate_attr(inode);
|
||||
fuse_write_update_size(inode, iocb->ki_pos);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
struct inode *inode = file_inode(iocb->ki_filp);
|
||||
ssize_t ret;
|
||||
|
||||
if (iocb->ki_flags & IOCB_NOWAIT) {
|
||||
if (!inode_trylock(inode))
|
||||
return -EAGAIN;
|
||||
} else {
|
||||
inode_lock(inode);
|
||||
}
|
||||
|
||||
ret = generic_write_checks(iocb, from);
|
||||
if (ret <= 0)
|
||||
goto out;
|
||||
|
||||
ret = file_remove_privs(iocb->ki_filp);
|
||||
if (ret)
|
||||
goto out;
|
||||
/* TODO file_update_time() but we don't want metadata I/O */
|
||||
|
||||
/* Do not use dax for file extending writes as write and on
|
||||
* disk i_size increase are not atomic otherwise.
|
||||
*/
|
||||
if (file_extending_write(iocb, from))
|
||||
ret = fuse_dax_direct_write(iocb, from);
|
||||
else
|
||||
ret = dax_iomap_rw(iocb, from, &fuse_iomap_ops);
|
||||
|
||||
out:
|
||||
inode_unlock(inode);
|
||||
|
||||
if (ret > 0)
|
||||
ret = generic_write_sync(iocb, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void fuse_free_dax_mem_ranges(struct list_head *mem_list)
|
||||
{
|
||||
struct fuse_dax_mapping *range, *temp;
|
||||
|
@ -116,6 +653,7 @@ int fuse_dax_conn_alloc(struct fuse_conn *fc, struct dax_device *dax_dev)
|
|||
if (!fcd)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&fcd->lock);
|
||||
fcd->dev = dax_dev;
|
||||
err = fuse_dax_mem_range_init(fcd);
|
||||
if (err) {
|
||||
|
@ -127,6 +665,33 @@ int fuse_dax_conn_alloc(struct fuse_conn *fc, struct dax_device *dax_dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi)
|
||||
{
|
||||
struct fuse_conn *fc = get_fuse_conn_super(sb);
|
||||
|
||||
fi->dax = NULL;
|
||||
if (fc->dax) {
|
||||
fi->dax = kzalloc(sizeof(*fi->dax), GFP_KERNEL_ACCOUNT);
|
||||
if (!fi->dax)
|
||||
return false;
|
||||
|
||||
init_rwsem(&fi->dax->sem);
|
||||
fi->dax->tree = RB_ROOT_CACHED;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void fuse_dax_inode_init(struct inode *inode)
|
||||
{
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
|
||||
if (!fc->dax)
|
||||
return;
|
||||
|
||||
inode->i_flags |= S_DAX;
|
||||
}
|
||||
|
||||
bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment)
|
||||
{
|
||||
if (fc->dax && (map_alignment > FUSE_DAX_SHIFT)) {
|
||||
|
|
|
@ -1539,10 +1539,14 @@ static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct fuse_file *ff = file->private_data;
|
||||
struct inode *inode = file_inode(file);
|
||||
|
||||
if (is_bad_inode(file_inode(file)))
|
||||
if (is_bad_inode(inode))
|
||||
return -EIO;
|
||||
|
||||
if (FUSE_IS_DAX(inode))
|
||||
return fuse_dax_read_iter(iocb, to);
|
||||
|
||||
if (!(ff->open_flags & FOPEN_DIRECT_IO))
|
||||
return fuse_cache_read_iter(iocb, to);
|
||||
else
|
||||
|
@ -1553,10 +1557,14 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct fuse_file *ff = file->private_data;
|
||||
struct inode *inode = file_inode(file);
|
||||
|
||||
if (is_bad_inode(file_inode(file)))
|
||||
if (is_bad_inode(inode))
|
||||
return -EIO;
|
||||
|
||||
if (FUSE_IS_DAX(inode))
|
||||
return fuse_dax_write_iter(iocb, from);
|
||||
|
||||
if (!(ff->open_flags & FOPEN_DIRECT_IO))
|
||||
return fuse_cache_write_iter(iocb, from);
|
||||
else
|
||||
|
@ -3440,4 +3448,7 @@ void fuse_init_file_inode(struct inode *inode)
|
|||
fi->writectr = 0;
|
||||
init_waitqueue_head(&fi->page_waitq);
|
||||
fi->writepages = RB_ROOT;
|
||||
|
||||
if (IS_ENABLED(CONFIG_FUSE_DAX))
|
||||
fuse_dax_inode_init(inode);
|
||||
}
|
||||
|
|
|
@ -148,6 +148,13 @@ struct fuse_inode {
|
|||
|
||||
/** Lock to protect write related fields */
|
||||
spinlock_t lock;
|
||||
|
||||
#ifdef CONFIG_FUSE_DAX
|
||||
/*
|
||||
* Dax specific inode data
|
||||
*/
|
||||
struct fuse_inode_dax *dax;
|
||||
#endif
|
||||
};
|
||||
|
||||
/** FUSE inode state bits */
|
||||
|
@ -1104,8 +1111,16 @@ void fuse_free_conn(struct fuse_conn *fc);
|
|||
|
||||
/* dax.c */
|
||||
|
||||
#define FUSE_IS_DAX(inode) (IS_ENABLED(CONFIG_FUSE_DAX) && IS_DAX(inode))
|
||||
|
||||
ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to);
|
||||
ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from);
|
||||
int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
int fuse_dax_conn_alloc(struct fuse_conn *fc, struct dax_device *dax_dev);
|
||||
void fuse_dax_conn_free(struct fuse_conn *fc);
|
||||
bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi);
|
||||
void fuse_dax_inode_init(struct inode *inode);
|
||||
void fuse_dax_inode_cleanup(struct inode *inode);
|
||||
bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment);
|
||||
|
||||
#endif /* _FS_FUSE_I_H */
|
||||
|
|
|
@ -87,12 +87,19 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
|
|||
mutex_init(&fi->mutex);
|
||||
spin_lock_init(&fi->lock);
|
||||
fi->forget = fuse_alloc_forget();
|
||||
if (!fi->forget) {
|
||||
kmem_cache_free(fuse_inode_cachep, fi);
|
||||
return NULL;
|
||||
}
|
||||
if (!fi->forget)
|
||||
goto out_free;
|
||||
|
||||
if (IS_ENABLED(CONFIG_FUSE_DAX) && !fuse_dax_inode_alloc(sb, fi))
|
||||
goto out_free_forget;
|
||||
|
||||
return &fi->inode;
|
||||
|
||||
out_free_forget:
|
||||
kfree(fi->forget);
|
||||
out_free:
|
||||
kmem_cache_free(fuse_inode_cachep, fi);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void fuse_free_inode(struct inode *inode)
|
||||
|
@ -101,6 +108,9 @@ static void fuse_free_inode(struct inode *inode)
|
|||
|
||||
mutex_destroy(&fi->mutex);
|
||||
kfree(fi->forget);
|
||||
#ifdef CONFIG_FUSE_DAX
|
||||
kfree(fi->dax);
|
||||
#endif
|
||||
kmem_cache_free(fuse_inode_cachep, fi);
|
||||
}
|
||||
|
||||
|
@ -112,6 +122,9 @@ static void fuse_evict_inode(struct inode *inode)
|
|||
clear_inode(inode);
|
||||
if (inode->i_sb->s_flags & SB_ACTIVE) {
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
|
||||
if (FUSE_IS_DAX(inode))
|
||||
fuse_dax_inode_cleanup(inode);
|
||||
fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup);
|
||||
fi->forget = NULL;
|
||||
}
|
||||
|
|
|
@ -895,6 +895,7 @@ struct fuse_copy_file_range_in {
|
|||
};
|
||||
|
||||
#define FUSE_SETUPMAPPING_FLAG_WRITE (1ull << 0)
|
||||
#define FUSE_SETUPMAPPING_FLAG_READ (1ull << 1)
|
||||
struct fuse_setupmapping_in {
|
||||
/* An already open handle */
|
||||
uint64_t fh;
|
||||
|
|
Loading…
Reference in New Issue
Block a user