wrappers for ->i_mutex access
parallel to mutex_{lock,unlock,trylock,is_locked,lock_nested}, inode_foo(inode) being mutex_foo(&inode->i_mutex). Please, use those for access to ->i_mutex; over the coming cycle ->i_mutex will become rwsem, with ->lookup() done with it held only shared. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
57b8f112cf
commit
5955102c99
|
@ -1799,9 +1799,9 @@ static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int data
|
|||
struct inode *inode = file_inode(file);
|
||||
int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
|
||||
if (!err) {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
err = spufs_mfc_flush(file, NULL);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -163,7 +163,7 @@ static void spufs_prune_dir(struct dentry *dir)
|
|||
{
|
||||
struct dentry *dentry, *tmp;
|
||||
|
||||
mutex_lock(&d_inode(dir)->i_mutex);
|
||||
inode_lock(d_inode(dir));
|
||||
list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (simple_positive(dentry)) {
|
||||
|
@ -180,7 +180,7 @@ static void spufs_prune_dir(struct dentry *dir)
|
|||
}
|
||||
}
|
||||
shrink_dcache_parent(dir);
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
}
|
||||
|
||||
/* Caller must hold parent->i_mutex */
|
||||
|
@ -225,9 +225,9 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
|
|||
parent = d_inode(dir->d_parent);
|
||||
ctx = SPUFS_I(d_inode(dir))->i_ctx;
|
||||
|
||||
mutex_lock_nested(&parent->i_mutex, I_MUTEX_PARENT);
|
||||
inode_lock_nested(parent, I_MUTEX_PARENT);
|
||||
ret = spufs_rmdir(parent, dir);
|
||||
mutex_unlock(&parent->i_mutex);
|
||||
inode_unlock(parent);
|
||||
WARN_ON(ret);
|
||||
|
||||
return dcache_dir_close(inode, file);
|
||||
|
@ -270,7 +270,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
|
|||
inode->i_op = &simple_dir_inode_operations;
|
||||
inode->i_fop = &simple_dir_operations;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
dget(dentry);
|
||||
inc_nlink(dir);
|
||||
|
@ -291,7 +291,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
|
|||
if (ret)
|
||||
spufs_rmdir(dir, dentry);
|
||||
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ static void hypfs_remove(struct dentry *dentry)
|
|||
struct dentry *parent;
|
||||
|
||||
parent = dentry->d_parent;
|
||||
mutex_lock(&d_inode(parent)->i_mutex);
|
||||
inode_lock(d_inode(parent));
|
||||
if (simple_positive(dentry)) {
|
||||
if (d_is_dir(dentry))
|
||||
simple_rmdir(d_inode(parent), dentry);
|
||||
|
@ -76,7 +76,7 @@ static void hypfs_remove(struct dentry *dentry)
|
|||
}
|
||||
d_delete(dentry);
|
||||
dput(dentry);
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
}
|
||||
|
||||
static void hypfs_delete_tree(struct dentry *root)
|
||||
|
@ -331,7 +331,7 @@ static struct dentry *hypfs_create_file(struct dentry *parent, const char *name,
|
|||
struct dentry *dentry;
|
||||
struct inode *inode;
|
||||
|
||||
mutex_lock(&d_inode(parent)->i_mutex);
|
||||
inode_lock(d_inode(parent));
|
||||
dentry = lookup_one_len(name, parent, strlen(name));
|
||||
if (IS_ERR(dentry)) {
|
||||
dentry = ERR_PTR(-ENOMEM);
|
||||
|
@ -359,7 +359,7 @@ static struct dentry *hypfs_create_file(struct dentry *parent, const char *name,
|
|||
d_instantiate(dentry, inode);
|
||||
dget(dentry);
|
||||
fail:
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
return dentry;
|
||||
}
|
||||
|
||||
|
|
|
@ -455,12 +455,12 @@ static int blkdev_daxset(struct block_device *bdev, unsigned long argp)
|
|||
if (arg && !blkdev_dax_capable(bdev))
|
||||
return -ENOTTY;
|
||||
|
||||
mutex_lock(&bdev->bd_inode->i_mutex);
|
||||
inode_lock(bdev->bd_inode);
|
||||
if (bdev->bd_map_count == 0)
|
||||
inode_set_flags(bdev->bd_inode, arg, S_DAX);
|
||||
else
|
||||
rc = -EBUSY;
|
||||
mutex_unlock(&bdev->bd_inode->i_mutex);
|
||||
inode_unlock(bdev->bd_inode);
|
||||
return rc;
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -215,9 +215,9 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
|
|||
newattrs.ia_uid = uid;
|
||||
newattrs.ia_gid = gid;
|
||||
newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
|
||||
mutex_lock(&d_inode(dentry)->i_mutex);
|
||||
inode_lock(d_inode(dentry));
|
||||
notify_change(dentry, &newattrs, NULL);
|
||||
mutex_unlock(&d_inode(dentry)->i_mutex);
|
||||
inode_unlock(d_inode(dentry));
|
||||
|
||||
/* mark as kernel-created inode */
|
||||
d_inode(dentry)->i_private = &thread;
|
||||
|
@ -244,7 +244,7 @@ static int dev_rmdir(const char *name)
|
|||
err = -ENOENT;
|
||||
}
|
||||
dput(dentry);
|
||||
mutex_unlock(&d_inode(parent.dentry)->i_mutex);
|
||||
inode_unlock(d_inode(parent.dentry));
|
||||
path_put(&parent);
|
||||
return err;
|
||||
}
|
||||
|
@ -321,9 +321,9 @@ static int handle_remove(const char *nodename, struct device *dev)
|
|||
newattrs.ia_mode = stat.mode & ~0777;
|
||||
newattrs.ia_valid =
|
||||
ATTR_UID|ATTR_GID|ATTR_MODE;
|
||||
mutex_lock(&d_inode(dentry)->i_mutex);
|
||||
inode_lock(d_inode(dentry));
|
||||
notify_change(dentry, &newattrs, NULL);
|
||||
mutex_unlock(&d_inode(dentry)->i_mutex);
|
||||
inode_unlock(d_inode(dentry));
|
||||
err = vfs_unlink(d_inode(parent.dentry), dentry, NULL);
|
||||
if (!err || err == -ENOENT)
|
||||
deleted = 1;
|
||||
|
@ -332,7 +332,7 @@ static int handle_remove(const char *nodename, struct device *dev)
|
|||
err = -ENOENT;
|
||||
}
|
||||
dput(dentry);
|
||||
mutex_unlock(&d_inode(parent.dentry)->i_mutex);
|
||||
inode_unlock(d_inode(parent.dentry));
|
||||
|
||||
path_put(&parent);
|
||||
if (deleted && strchr(nodename, '/'))
|
||||
|
|
|
@ -964,9 +964,9 @@ aoecmd_sleepwork(struct work_struct *work)
|
|||
ssize = get_capacity(d->gd);
|
||||
bd = bdget_disk(d->gd, 0);
|
||||
if (bd) {
|
||||
mutex_lock(&bd->bd_inode->i_mutex);
|
||||
inode_lock(bd->bd_inode);
|
||||
i_size_write(bd->bd_inode, (loff_t)ssize<<9);
|
||||
mutex_unlock(&bd->bd_inode->i_mutex);
|
||||
inode_unlock(bd->bd_inode);
|
||||
bdput(bd);
|
||||
}
|
||||
spin_lock_irq(&d->lock);
|
||||
|
|
|
@ -434,12 +434,12 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo
|
|||
if (!parent || d_really_is_negative(parent))
|
||||
goto out;
|
||||
/* serialize with d_delete() */
|
||||
mutex_lock(&d_inode(parent)->i_mutex);
|
||||
inode_lock(d_inode(parent));
|
||||
/* Make sure the object is still alive */
|
||||
if (simple_positive(file->f_path.dentry)
|
||||
&& kref_get_unless_zero(kref))
|
||||
ret = 0;
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
if (!ret) {
|
||||
ret = single_open(file, show, data);
|
||||
if (ret)
|
||||
|
|
|
@ -689,7 +689,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
|
|||
{
|
||||
loff_t ret;
|
||||
|
||||
mutex_lock(&file_inode(file)->i_mutex);
|
||||
inode_lock(file_inode(file));
|
||||
switch (orig) {
|
||||
case SEEK_CUR:
|
||||
offset += file->f_pos;
|
||||
|
@ -706,7 +706,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
|
|||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
mutex_unlock(&file_inode(file)->i_mutex);
|
||||
inode_unlock(file_inode(file));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -290,9 +290,9 @@ static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datas
|
|||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
int err;
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
err = ps3flash_writeback(ps3flash_dev);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -89,14 +89,14 @@ static int create_file(const char *name, umode_t mode,
|
|||
{
|
||||
int error;
|
||||
|
||||
mutex_lock(&d_inode(parent)->i_mutex);
|
||||
inode_lock(d_inode(parent));
|
||||
*dentry = lookup_one_len(name, parent, strlen(name));
|
||||
if (!IS_ERR(*dentry))
|
||||
error = qibfs_mknod(d_inode(parent), *dentry,
|
||||
mode, fops, data);
|
||||
else
|
||||
error = PTR_ERR(*dentry);
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -481,7 +481,7 @@ static int remove_device_files(struct super_block *sb,
|
|||
int ret, i;
|
||||
|
||||
root = dget(sb->s_root);
|
||||
mutex_lock(&d_inode(root)->i_mutex);
|
||||
inode_lock(d_inode(root));
|
||||
snprintf(unit, sizeof(unit), "%u", dd->unit);
|
||||
dir = lookup_one_len(unit, root, strlen(unit));
|
||||
|
||||
|
@ -491,7 +491,7 @@ static int remove_device_files(struct super_block *sb,
|
|||
goto bail;
|
||||
}
|
||||
|
||||
mutex_lock(&d_inode(dir)->i_mutex);
|
||||
inode_lock(d_inode(dir));
|
||||
remove_file(dir, "counters");
|
||||
remove_file(dir, "counter_names");
|
||||
remove_file(dir, "portcounter_names");
|
||||
|
@ -506,13 +506,13 @@ static int remove_device_files(struct super_block *sb,
|
|||
}
|
||||
}
|
||||
remove_file(dir, "flash");
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
ret = simple_rmdir(d_inode(root), dir);
|
||||
d_delete(dir);
|
||||
dput(dir);
|
||||
|
||||
bail:
|
||||
mutex_unlock(&d_inode(root)->i_mutex);
|
||||
inode_unlock(d_inode(root));
|
||||
dput(root);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -174,9 +174,9 @@ static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
|
|||
struct ubi_device *ubi = desc->vol->ubi;
|
||||
struct inode *inode = file_inode(file);
|
||||
int err;
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
err = ubi_sync(ubi->ubi_num);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -138,22 +138,22 @@ static int __oprofilefs_create_file(struct dentry *root, char const *name,
|
|||
struct dentry *dentry;
|
||||
struct inode *inode;
|
||||
|
||||
mutex_lock(&d_inode(root)->i_mutex);
|
||||
inode_lock(d_inode(root));
|
||||
dentry = d_alloc_name(root, name);
|
||||
if (!dentry) {
|
||||
mutex_unlock(&d_inode(root)->i_mutex);
|
||||
inode_unlock(d_inode(root));
|
||||
return -ENOMEM;
|
||||
}
|
||||
inode = oprofilefs_get_inode(root->d_sb, S_IFREG | perm);
|
||||
if (!inode) {
|
||||
dput(dentry);
|
||||
mutex_unlock(&d_inode(root)->i_mutex);
|
||||
inode_unlock(d_inode(root));
|
||||
return -ENOMEM;
|
||||
}
|
||||
inode->i_fop = fops;
|
||||
inode->i_private = priv;
|
||||
d_add(dentry, inode);
|
||||
mutex_unlock(&d_inode(root)->i_mutex);
|
||||
inode_unlock(d_inode(root));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -215,22 +215,22 @@ struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name)
|
|||
struct dentry *dentry;
|
||||
struct inode *inode;
|
||||
|
||||
mutex_lock(&d_inode(parent)->i_mutex);
|
||||
inode_lock(d_inode(parent));
|
||||
dentry = d_alloc_name(parent, name);
|
||||
if (!dentry) {
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
return NULL;
|
||||
}
|
||||
inode = oprofilefs_get_inode(parent->d_sb, S_IFDIR | 0755);
|
||||
if (!inode) {
|
||||
dput(dentry);
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
return NULL;
|
||||
}
|
||||
inode->i_op = &simple_dir_inode_operations;
|
||||
inode->i_fop = &simple_dir_operations;
|
||||
d_add(dentry, inode);
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
return dentry;
|
||||
}
|
||||
|
||||
|
|
|
@ -1858,7 +1858,7 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
|
|||
int api32 = ll_need_32bit_api(sbi);
|
||||
loff_t ret = -EINVAL;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
switch (origin) {
|
||||
case SEEK_SET:
|
||||
break;
|
||||
|
@ -1896,7 +1896,7 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
|
|||
goto out;
|
||||
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -2082,17 +2082,17 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
|
|||
/* update time if requested */
|
||||
rc = 0;
|
||||
if (llss->ia2.ia_valid != 0) {
|
||||
mutex_lock(&llss->inode1->i_mutex);
|
||||
inode_lock(llss->inode1);
|
||||
rc = ll_setattr(file1->f_path.dentry, &llss->ia2);
|
||||
mutex_unlock(&llss->inode1->i_mutex);
|
||||
inode_unlock(llss->inode1);
|
||||
}
|
||||
|
||||
if (llss->ia1.ia_valid != 0) {
|
||||
int rc1;
|
||||
|
||||
mutex_lock(&llss->inode2->i_mutex);
|
||||
inode_lock(llss->inode2);
|
||||
rc1 = ll_setattr(file2->f_path.dentry, &llss->ia1);
|
||||
mutex_unlock(&llss->inode2->i_mutex);
|
||||
inode_unlock(llss->inode2);
|
||||
if (rc == 0)
|
||||
rc = rc1;
|
||||
}
|
||||
|
@ -2179,13 +2179,13 @@ static int ll_hsm_import(struct inode *inode, struct file *file,
|
|||
ATTR_MTIME | ATTR_MTIME_SET |
|
||||
ATTR_ATIME | ATTR_ATIME_SET;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
rc = ll_setattr_raw(file->f_path.dentry, attr, true);
|
||||
if (rc == -ENODATA)
|
||||
rc = 0;
|
||||
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
kfree(attr);
|
||||
free_hss:
|
||||
|
@ -2609,7 +2609,7 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1);
|
||||
|
||||
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/* catch async errors that were recorded back when async writeback
|
||||
* failed for pages in this mapping. */
|
||||
|
@ -2641,7 +2641,7 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
fd->fd_write_failed = false;
|
||||
}
|
||||
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -1277,7 +1277,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
|
|||
return -ENOMEM;
|
||||
|
||||
if (!S_ISDIR(inode->i_mode))
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
memcpy(&op_data->op_attr, attr, sizeof(*attr));
|
||||
|
||||
|
@ -1358,7 +1358,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
|
|||
ll_finish_md_op_data(op_data);
|
||||
|
||||
if (!S_ISDIR(inode->i_mode)) {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
|
||||
inode_dio_wait(inode);
|
||||
}
|
||||
|
|
|
@ -245,9 +245,9 @@ static int ll_get_name(struct dentry *dentry, char *name,
|
|||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&dir->i_mutex);
|
||||
inode_lock(dir);
|
||||
rc = ll_dir_read(dir, &lgd.ctx);
|
||||
mutex_unlock(&dir->i_mutex);
|
||||
inode_unlock(dir);
|
||||
if (!rc && !lgd.lgd_found)
|
||||
rc = -ENOENT;
|
||||
out:
|
||||
|
|
|
@ -257,9 +257,9 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
|
|||
* be asked to write less pages once, this purely depends on
|
||||
* implementation. Anyway, we should be careful to avoid deadlocking.
|
||||
*/
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
bytes = ll_direct_rw_pages(env, io, rw, inode, pvec);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
cl_io_fini(env, io);
|
||||
return (bytes == pvec->ldp_size) ? 0 : (int)bytes;
|
||||
}
|
||||
|
|
|
@ -115,8 +115,8 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
|
|||
struct inode *inode = vmpage->mapping->host;
|
||||
loff_t pos;
|
||||
|
||||
if (mutex_trylock(&inode->i_mutex)) {
|
||||
mutex_unlock(&(inode)->i_mutex);
|
||||
if (inode_trylock(inode)) {
|
||||
inode_unlock((inode));
|
||||
|
||||
/* this is too bad. Someone is trying to write the
|
||||
* page w/o holding inode mutex. This means we can
|
||||
|
|
|
@ -403,7 +403,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
|
|||
* 1. Need inode mutex to operate transient pages.
|
||||
*/
|
||||
if (iov_iter_rw(iter) == READ)
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
LASSERT(obj->cob_transient_pages == 0);
|
||||
while (iov_iter_count(iter)) {
|
||||
|
@ -454,7 +454,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
|
|||
out:
|
||||
LASSERT(obj->cob_transient_pages == 0);
|
||||
if (iov_iter_rw(iter) == READ)
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
if (tot_bytes > 0) {
|
||||
if (iov_iter_rw(iter) == WRITE) {
|
||||
|
|
|
@ -439,7 +439,7 @@ static int vvp_io_setattr_start(const struct lu_env *env,
|
|||
struct inode *inode = ccc_object_inode(io->ci_obj);
|
||||
int result = 0;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
if (cl_io_is_trunc(io))
|
||||
result = vvp_io_setattr_trunc(env, ios, inode,
|
||||
io->u.ci_setattr.sa_attr.lvb_size);
|
||||
|
@ -459,7 +459,7 @@ static void vvp_io_setattr_end(const struct lu_env *env,
|
|||
* because osc has already notified to destroy osc_extents. */
|
||||
vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
|
||||
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
|
||||
static void vvp_io_setattr_fini(const struct lu_env *env,
|
||||
|
|
|
@ -428,7 +428,7 @@ static void vvp_transient_page_verify(const struct cl_page *page)
|
|||
{
|
||||
struct inode *inode = ccc_object_inode(page->cp_obj);
|
||||
|
||||
LASSERT(!mutex_trylock(&inode->i_mutex));
|
||||
LASSERT(!inode_trylock(inode));
|
||||
}
|
||||
|
||||
static int vvp_transient_page_own(const struct lu_env *env,
|
||||
|
@ -480,9 +480,9 @@ static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
|
|||
struct inode *inode = ccc_object_inode(slice->cpl_obj);
|
||||
int locked;
|
||||
|
||||
locked = !mutex_trylock(&inode->i_mutex);
|
||||
locked = !inode_trylock(inode);
|
||||
if (!locked)
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return locked ? -EBUSY : -ENODATA;
|
||||
}
|
||||
|
||||
|
@ -502,7 +502,7 @@ static void vvp_transient_page_fini(const struct lu_env *env,
|
|||
struct ccc_object *clobj = cl2ccc(clp->cp_obj);
|
||||
|
||||
vvp_page_fini_common(cp);
|
||||
LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
|
||||
LASSERT(!inode_trylock(clobj->cob_inode));
|
||||
clobj->cob_transient_pages--;
|
||||
}
|
||||
|
||||
|
@ -548,7 +548,7 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
|
|||
} else {
|
||||
struct ccc_object *clobj = cl2ccc(obj);
|
||||
|
||||
LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
|
||||
LASSERT(!inode_trylock(clobj->cob_inode));
|
||||
cl_page_slice_add(page, &cpg->cpg_cl, obj,
|
||||
&vvp_transient_page_ops);
|
||||
clobj->cob_transient_pages++;
|
||||
|
|
|
@ -82,14 +82,14 @@ static int create_file(const char *name, umode_t mode,
|
|||
{
|
||||
int error;
|
||||
|
||||
mutex_lock(&d_inode(parent)->i_mutex);
|
||||
inode_lock(d_inode(parent));
|
||||
*dentry = lookup_one_len(name, parent, strlen(name));
|
||||
if (!IS_ERR(*dentry))
|
||||
error = ipathfs_mknod(d_inode(parent), *dentry,
|
||||
mode, fops, data);
|
||||
else
|
||||
error = PTR_ERR(*dentry);
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -295,7 +295,7 @@ static int remove_device_files(struct super_block *sb,
|
|||
int ret;
|
||||
|
||||
root = dget(sb->s_root);
|
||||
mutex_lock(&d_inode(root)->i_mutex);
|
||||
inode_lock(d_inode(root));
|
||||
snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
|
||||
dir = lookup_one_len(unit, root, strlen(unit));
|
||||
|
||||
|
@ -311,7 +311,7 @@ static int remove_device_files(struct super_block *sb,
|
|||
ret = simple_rmdir(d_inode(root), dir);
|
||||
|
||||
bail:
|
||||
mutex_unlock(&d_inode(root)->i_mutex);
|
||||
inode_unlock(d_inode(root));
|
||||
dput(root);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -673,7 +673,7 @@ printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync)
|
|||
unsigned long flags;
|
||||
int tx_list_empty;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
tx_list_empty = (likely(list_empty(&dev->tx_reqs)));
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
@ -683,7 +683,7 @@ printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync)
|
|||
wait_event_interruptible(dev->tx_flush_wait,
|
||||
(likely(list_empty(&dev->tx_reqs_active))));
|
||||
}
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1521,10 +1521,10 @@ static void destroy_ep_files (struct dev_data *dev)
|
|||
spin_unlock_irq (&dev->lock);
|
||||
|
||||
/* break link to dcache */
|
||||
mutex_lock (&parent->i_mutex);
|
||||
inode_lock(parent);
|
||||
d_delete (dentry);
|
||||
dput (dentry);
|
||||
mutex_unlock (&parent->i_mutex);
|
||||
inode_unlock(parent);
|
||||
|
||||
spin_lock_irq (&dev->lock);
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ static ssize_t queue_dbg_read(struct file *file, char __user *buf,
|
|||
if (!access_ok(VERIFY_WRITE, buf, nbytes))
|
||||
return -EFAULT;
|
||||
|
||||
mutex_lock(&file_inode(file)->i_mutex);
|
||||
inode_lock(file_inode(file));
|
||||
list_for_each_entry_safe(req, tmp_req, queue, queue) {
|
||||
len = snprintf(tmpbuf, sizeof(tmpbuf),
|
||||
"%8p %08x %c%c%c %5d %c%c%c\n",
|
||||
|
@ -118,7 +118,7 @@ static ssize_t queue_dbg_read(struct file *file, char __user *buf,
|
|||
nbytes -= len;
|
||||
buf += len;
|
||||
}
|
||||
mutex_unlock(&file_inode(file)->i_mutex);
|
||||
inode_unlock(file_inode(file));
|
||||
|
||||
return actual;
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ static int regs_dbg_open(struct inode *inode, struct file *file)
|
|||
u32 *data;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
udc = inode->i_private;
|
||||
data = kmalloc(inode->i_size, GFP_KERNEL);
|
||||
if (!data)
|
||||
|
@ -158,7 +158,7 @@ static int regs_dbg_open(struct inode *inode, struct file *file)
|
|||
ret = 0;
|
||||
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -169,11 +169,11 @@ static ssize_t regs_dbg_read(struct file *file, char __user *buf,
|
|||
struct inode *inode = file_inode(file);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
ret = simple_read_from_buffer(buf, nbytes, ppos,
|
||||
file->private_data,
|
||||
file_inode(file)->i_size);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -78,13 +78,13 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
|
|||
if (!info->fbdefio)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
/* Kill off the delayed work */
|
||||
cancel_delayed_work_sync(&info->deferred_work);
|
||||
|
||||
/* Run it immediately */
|
||||
schedule_delayed_work(&info->deferred_work, 0);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -449,14 +449,14 @@ static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
|
|||
if (retval)
|
||||
return retval;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
|
||||
|
||||
fid = filp->private_data;
|
||||
v9fs_blank_wstat(&wstat);
|
||||
|
||||
retval = p9_client_wstat(fid, &wstat);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -472,13 +472,13 @@ int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
|
|||
if (retval)
|
||||
return retval;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
|
||||
|
||||
fid = filp->private_data;
|
||||
|
||||
retval = p9_client_fsync(fid, datasync);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
|
|
@ -33,11 +33,11 @@ affs_file_release(struct inode *inode, struct file *filp)
|
|||
inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
|
||||
|
||||
if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
if (inode->i_size != AFFS_I(inode)->mmu_private)
|
||||
affs_truncate(inode);
|
||||
affs_free_prealloc(inode);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -958,12 +958,12 @@ int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
ret = write_inode_now(inode, 0);
|
||||
err = sync_blockdev(inode->i_sb->s_bdev);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
const struct file_operations affs_file_operations = {
|
||||
|
|
|
@ -483,7 +483,7 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl)
|
|||
|
||||
fl->fl_type = F_UNLCK;
|
||||
|
||||
mutex_lock(&vnode->vfs_inode.i_mutex);
|
||||
inode_lock(&vnode->vfs_inode);
|
||||
|
||||
/* check local lock records first */
|
||||
ret = 0;
|
||||
|
@ -505,7 +505,7 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl)
|
|||
}
|
||||
|
||||
error:
|
||||
mutex_unlock(&vnode->vfs_inode.i_mutex);
|
||||
inode_unlock(&vnode->vfs_inode);
|
||||
_leave(" = %d [%hd]", ret, fl->fl_type);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -693,7 +693,7 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
|
||||
if (ret)
|
||||
return ret;
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/* use a writeback record as a marker in the queue - when this reaches
|
||||
* the front of the queue, all the outstanding writes are either
|
||||
|
@ -735,7 +735,7 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
afs_put_writeback(wb);
|
||||
_leave(" = %d", ret);
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de
|
|||
struct timespec now;
|
||||
unsigned int ia_valid = attr->ia_valid;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex));
|
||||
WARN_ON_ONCE(!inode_is_locked(inode));
|
||||
|
||||
if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)) {
|
||||
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
|
||||
|
|
|
@ -638,11 +638,11 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
|
|||
case 3:
|
||||
/* Delete this handler. */
|
||||
root = dget(file->f_path.dentry->d_sb->s_root);
|
||||
mutex_lock(&d_inode(root)->i_mutex);
|
||||
inode_lock(d_inode(root));
|
||||
|
||||
kill_node(e);
|
||||
|
||||
mutex_unlock(&d_inode(root)->i_mutex);
|
||||
inode_unlock(d_inode(root));
|
||||
dput(root);
|
||||
break;
|
||||
default:
|
||||
|
@ -675,7 +675,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
|
|||
return PTR_ERR(e);
|
||||
|
||||
root = dget(sb->s_root);
|
||||
mutex_lock(&d_inode(root)->i_mutex);
|
||||
inode_lock(d_inode(root));
|
||||
dentry = lookup_one_len(e->name, root, strlen(e->name));
|
||||
err = PTR_ERR(dentry);
|
||||
if (IS_ERR(dentry))
|
||||
|
@ -711,7 +711,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
|
|||
out2:
|
||||
dput(dentry);
|
||||
out:
|
||||
mutex_unlock(&d_inode(root)->i_mutex);
|
||||
inode_unlock(d_inode(root));
|
||||
dput(root);
|
||||
|
||||
if (err) {
|
||||
|
@ -754,12 +754,12 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer,
|
|||
case 3:
|
||||
/* Delete all handlers. */
|
||||
root = dget(file->f_path.dentry->d_sb->s_root);
|
||||
mutex_lock(&d_inode(root)->i_mutex);
|
||||
inode_lock(d_inode(root));
|
||||
|
||||
while (!list_empty(&entries))
|
||||
kill_node(list_entry(entries.next, Node, list));
|
||||
|
||||
mutex_unlock(&d_inode(root)->i_mutex);
|
||||
inode_unlock(d_inode(root));
|
||||
dput(root);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -346,9 +346,9 @@ static loff_t block_llseek(struct file *file, loff_t offset, int whence)
|
|||
struct inode *bd_inode = bdev_file_inode(file);
|
||||
loff_t retval;
|
||||
|
||||
mutex_lock(&bd_inode->i_mutex);
|
||||
inode_lock(bd_inode);
|
||||
retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
|
||||
mutex_unlock(&bd_inode->i_mutex);
|
||||
inode_unlock(bd_inode);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -1142,9 +1142,9 @@ void bd_set_size(struct block_device *bdev, loff_t size)
|
|||
{
|
||||
unsigned bsize = bdev_logical_block_size(bdev);
|
||||
|
||||
mutex_lock(&bdev->bd_inode->i_mutex);
|
||||
inode_lock(bdev->bd_inode);
|
||||
i_size_write(bdev->bd_inode, size);
|
||||
mutex_unlock(&bdev->bd_inode->i_mutex);
|
||||
inode_unlock(bdev->bd_inode);
|
||||
while (bsize < PAGE_CACHE_SIZE) {
|
||||
if (size & bsize)
|
||||
break;
|
||||
|
@ -1741,9 +1741,9 @@ static void blkdev_vm_open(struct vm_area_struct *vma)
|
|||
struct inode *bd_inode = bdev_file_inode(vma->vm_file);
|
||||
struct block_device *bdev = I_BDEV(bd_inode);
|
||||
|
||||
mutex_lock(&bd_inode->i_mutex);
|
||||
inode_lock(bd_inode);
|
||||
bdev->bd_map_count++;
|
||||
mutex_unlock(&bd_inode->i_mutex);
|
||||
inode_unlock(bd_inode);
|
||||
}
|
||||
|
||||
static void blkdev_vm_close(struct vm_area_struct *vma)
|
||||
|
@ -1751,9 +1751,9 @@ static void blkdev_vm_close(struct vm_area_struct *vma)
|
|||
struct inode *bd_inode = bdev_file_inode(vma->vm_file);
|
||||
struct block_device *bdev = I_BDEV(bd_inode);
|
||||
|
||||
mutex_lock(&bd_inode->i_mutex);
|
||||
inode_lock(bd_inode);
|
||||
bdev->bd_map_count--;
|
||||
mutex_unlock(&bd_inode->i_mutex);
|
||||
inode_unlock(bd_inode);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct blkdev_dax_vm_ops = {
|
||||
|
@ -1777,7 +1777,7 @@ static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
struct block_device *bdev = I_BDEV(bd_inode);
|
||||
|
||||
file_accessed(file);
|
||||
mutex_lock(&bd_inode->i_mutex);
|
||||
inode_lock(bd_inode);
|
||||
bdev->bd_map_count++;
|
||||
if (IS_DAX(bd_inode)) {
|
||||
vma->vm_ops = &blkdev_dax_vm_ops;
|
||||
|
@ -1785,7 +1785,7 @@ static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
} else {
|
||||
vma->vm_ops = &blkdev_default_vm_ops;
|
||||
}
|
||||
mutex_unlock(&bd_inode->i_mutex);
|
||||
inode_unlock(bd_inode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1762,17 +1762,17 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
|
|||
loff_t pos;
|
||||
size_t count;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
err = generic_write_checks(iocb, from);
|
||||
if (err <= 0) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return err;
|
||||
}
|
||||
|
||||
current->backing_dev_info = inode_to_bdi(inode);
|
||||
err = file_remove_privs(file);
|
||||
if (err) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1783,7 +1783,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
|
|||
* to stop this write operation to ensure FS consistency.
|
||||
*/
|
||||
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
err = -EROFS;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1804,7 +1804,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
|
|||
end_pos = round_up(pos + count, root->sectorsize);
|
||||
err = btrfs_cont_expand(inode, i_size_read(inode), end_pos);
|
||||
if (err) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -1820,7 +1820,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
|
|||
iocb->ki_pos = pos + num_written;
|
||||
}
|
||||
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
/*
|
||||
* We also have to set last_sub_trans to the current log transid,
|
||||
|
@ -1909,7 +1909,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
atomic_inc(&root->log_batch);
|
||||
full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
|
@ -1961,7 +1961,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
ret = start_ordered_ops(inode, start, end);
|
||||
}
|
||||
if (ret) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
goto out;
|
||||
}
|
||||
atomic_inc(&root->log_batch);
|
||||
|
@ -2007,7 +2007,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
*/
|
||||
clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -2031,7 +2031,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
trans = btrfs_start_transaction(root, 0);
|
||||
if (IS_ERR(trans)) {
|
||||
ret = PTR_ERR(trans);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
goto out;
|
||||
}
|
||||
trans->sync = true;
|
||||
|
@ -2054,7 +2054,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
* file again, but that will end up using the synchronization
|
||||
* inside btrfs_sync_log to keep things safe.
|
||||
*/
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
/*
|
||||
* If any of the ordered extents had an error, just return it to user
|
||||
|
@ -2303,7 +2303,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE);
|
||||
ret = find_first_non_hole(inode, &offset, &len);
|
||||
if (ret < 0)
|
||||
|
@ -2343,7 +2343,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|||
truncated_page = true;
|
||||
ret = btrfs_truncate_page(inode, offset, 0, 0);
|
||||
if (ret) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -2419,7 +2419,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|||
ret = btrfs_wait_ordered_range(inode, lockstart,
|
||||
lockend - lockstart + 1);
|
||||
if (ret) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -2574,7 +2574,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|||
ret = btrfs_end_transaction(trans, root);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
if (ret && !err)
|
||||
err = ret;
|
||||
return err;
|
||||
|
@ -2658,7 +2658,7 @@ static long btrfs_fallocate(struct file *file, int mode,
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
ret = inode_newsize_ok(inode, alloc_end);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -2816,7 +2816,7 @@ static long btrfs_fallocate(struct file *file, int mode,
|
|||
* So this is completely used as cleanup.
|
||||
*/
|
||||
btrfs_qgroup_free_data(inode, alloc_start, alloc_end - alloc_start);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
/* Let go of our reservation. */
|
||||
btrfs_free_reserved_data_space(inode, alloc_start,
|
||||
alloc_end - alloc_start);
|
||||
|
@ -2892,7 +2892,7 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
|
|||
struct inode *inode = file->f_mapping->host;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
switch (whence) {
|
||||
case SEEK_END:
|
||||
case SEEK_CUR:
|
||||
|
@ -2901,20 +2901,20 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
|
|||
case SEEK_DATA:
|
||||
case SEEK_HOLE:
|
||||
if (offset >= i_size_read(inode)) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
ret = find_desired_extent(inode, &offset, whence);
|
||||
if (ret) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
|
|
@ -8447,7 +8447,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
|||
* not unlock the i_mutex at this case.
|
||||
*/
|
||||
if (offset + count <= inode->i_size) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
relock = true;
|
||||
}
|
||||
ret = btrfs_delalloc_reserve_space(inode, offset, count);
|
||||
|
@ -8504,7 +8504,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
|||
if (wakeup)
|
||||
inode_dio_end(inode);
|
||||
if (relock)
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -240,7 +240,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
ip_oldflags = ip->flags;
|
||||
i_oldflags = inode->i_flags;
|
||||
|
@ -358,7 +358,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
|
|||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
mnt_drop_write_file(file);
|
||||
return ret;
|
||||
}
|
||||
|
@ -881,7 +881,7 @@ static noinline int btrfs_mksubvol(struct path *parent,
|
|||
out_dput:
|
||||
dput(dentry);
|
||||
out_unlock:
|
||||
mutex_unlock(&dir->i_mutex);
|
||||
inode_unlock(dir);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1393,18 +1393,18 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
|
|||
ra_index += cluster;
|
||||
}
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
|
||||
BTRFS_I(inode)->force_compress = compress_type;
|
||||
ret = cluster_pages_for_defrag(inode, pages, i, cluster);
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
goto out_ra;
|
||||
}
|
||||
|
||||
defrag_count += ret;
|
||||
balance_dirty_pages_ratelimited(inode->i_mapping);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
if (newer_than) {
|
||||
if (newer_off == (u64)-1)
|
||||
|
@ -1465,9 +1465,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
|
|||
|
||||
out_ra:
|
||||
if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE;
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
if (!file)
|
||||
kfree(ra);
|
||||
|
@ -2430,7 +2430,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
|
|||
goto out_dput;
|
||||
}
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/*
|
||||
* Don't allow to delete a subvolume with send in progress. This is
|
||||
|
@ -2543,7 +2543,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
|
|||
spin_unlock(&dest->root_item_lock);
|
||||
}
|
||||
out_unlock_inode:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
if (!err) {
|
||||
d_invalidate(dentry);
|
||||
btrfs_invalidate_inodes(dest);
|
||||
|
@ -2559,7 +2559,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
|
|||
out_dput:
|
||||
dput(dentry);
|
||||
out_unlock_dir:
|
||||
mutex_unlock(&dir->i_mutex);
|
||||
inode_unlock(dir);
|
||||
out_drop_write:
|
||||
mnt_drop_write_file(file);
|
||||
out:
|
||||
|
@ -2857,8 +2857,8 @@ static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
|
|||
|
||||
static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
|
||||
{
|
||||
mutex_unlock(&inode1->i_mutex);
|
||||
mutex_unlock(&inode2->i_mutex);
|
||||
inode_unlock(inode1);
|
||||
inode_unlock(inode2);
|
||||
}
|
||||
|
||||
static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
|
||||
|
@ -2866,8 +2866,8 @@ static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
|
|||
if (inode1 < inode2)
|
||||
swap(inode1, inode2);
|
||||
|
||||
mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
|
||||
mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
|
||||
inode_lock_nested(inode1, I_MUTEX_PARENT);
|
||||
inode_lock_nested(inode2, I_MUTEX_CHILD);
|
||||
}
|
||||
|
||||
static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
|
||||
|
@ -3026,7 +3026,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
|
|||
return 0;
|
||||
|
||||
if (same_inode) {
|
||||
mutex_lock(&src->i_mutex);
|
||||
inode_lock(src);
|
||||
|
||||
ret = extent_same_check_offsets(src, loff, &len, olen);
|
||||
if (ret)
|
||||
|
@ -3101,7 +3101,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
|
|||
btrfs_cmp_data_free(&cmp);
|
||||
out_unlock:
|
||||
if (same_inode)
|
||||
mutex_unlock(&src->i_mutex);
|
||||
inode_unlock(src);
|
||||
else
|
||||
btrfs_double_inode_unlock(src, dst);
|
||||
|
||||
|
@ -3749,7 +3749,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
|
|||
if (!same_inode) {
|
||||
btrfs_double_inode_lock(src, inode);
|
||||
} else {
|
||||
mutex_lock(&src->i_mutex);
|
||||
inode_lock(src);
|
||||
}
|
||||
|
||||
/* determine range to clone */
|
||||
|
@ -3820,7 +3820,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
|
|||
if (!same_inode)
|
||||
btrfs_double_inode_unlock(src, inode);
|
||||
else
|
||||
mutex_unlock(&src->i_mutex);
|
||||
inode_unlock(src);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -3030,7 +3030,7 @@ int prealloc_file_extent_cluster(struct inode *inode,
|
|||
int ret = 0;
|
||||
|
||||
BUG_ON(cluster->start != cluster->boundary[0]);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
ret = btrfs_check_data_free_space(inode, cluster->start,
|
||||
cluster->end + 1 - cluster->start);
|
||||
|
@ -3057,7 +3057,7 @@ int prealloc_file_extent_cluster(struct inode *inode,
|
|||
btrfs_free_reserved_data_space(inode, cluster->start,
|
||||
cluster->end + 1 - cluster->start);
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -4279,7 +4279,7 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
|
|||
return PTR_ERR(inode);
|
||||
|
||||
/* Avoid truncate/dio/punch hole.. */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
inode_dio_wait(inode);
|
||||
|
||||
physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
|
||||
|
@ -4358,7 +4358,7 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
|
|||
}
|
||||
ret = COPY_COMPLETE;
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
iput(inode);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -126,7 +126,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
|
|||
* locks the inode's i_mutex before calling setxattr or removexattr.
|
||||
*/
|
||||
if (flags & XATTR_REPLACE) {
|
||||
ASSERT(mutex_is_locked(&inode->i_mutex));
|
||||
ASSERT(inode_is_locked(inode));
|
||||
di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
|
||||
name, name_len, 0);
|
||||
if (!di)
|
||||
|
|
|
@ -446,7 +446,7 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
|
|||
return 0;
|
||||
|
||||
cachefiles_begin_secure(cache, &saved_cred);
|
||||
mutex_lock(&d_inode(object->backer)->i_mutex);
|
||||
inode_lock(d_inode(object->backer));
|
||||
|
||||
/* if there's an extension to a partial page at the end of the backing
|
||||
* file, we need to discard the partial page so that we pick up new
|
||||
|
@ -465,7 +465,7 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
|
|||
ret = notify_change(object->backer, &newattrs, NULL);
|
||||
|
||||
truncate_failed:
|
||||
mutex_unlock(&d_inode(object->backer)->i_mutex);
|
||||
inode_unlock(d_inode(object->backer));
|
||||
cachefiles_end_secure(cache, saved_cred);
|
||||
|
||||
if (ret == -EIO) {
|
||||
|
|
|
@ -295,7 +295,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
|
|||
cachefiles_mark_object_buried(cache, rep, why);
|
||||
}
|
||||
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
|
||||
if (ret == -EIO)
|
||||
cachefiles_io_error(cache, "Unlink failed");
|
||||
|
@ -306,7 +306,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
|
|||
|
||||
/* directories have to be moved to the graveyard */
|
||||
_debug("move stale object to graveyard");
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
|
||||
try_again:
|
||||
/* first step is to make up a grave dentry in the graveyard */
|
||||
|
@ -423,13 +423,13 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
|
|||
|
||||
dir = dget_parent(object->dentry);
|
||||
|
||||
mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT);
|
||||
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
|
||||
|
||||
if (test_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->fscache.flags)) {
|
||||
/* object allocation for the same key preemptively deleted this
|
||||
* object's file so that it could create its own file */
|
||||
_debug("object preemptively buried");
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
ret = 0;
|
||||
} else {
|
||||
/* we need to check that our parent is _still_ our parent - it
|
||||
|
@ -442,7 +442,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
|
|||
/* it got moved, presumably by cachefilesd culling it,
|
||||
* so it's no longer in the key path and we can ignore
|
||||
* it */
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
@ -501,7 +501,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
|
|||
/* search the current directory for the element name */
|
||||
_debug("lookup '%s'", name);
|
||||
|
||||
mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT);
|
||||
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
|
||||
|
||||
start = jiffies;
|
||||
next = lookup_one_len(name, dir, nlen);
|
||||
|
@ -585,7 +585,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
|
|||
/* process the next component */
|
||||
if (key) {
|
||||
_debug("advance");
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
dput(dir);
|
||||
dir = next;
|
||||
next = NULL;
|
||||
|
@ -623,7 +623,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
|
|||
/* note that we're now using this object */
|
||||
ret = cachefiles_mark_object_active(cache, object);
|
||||
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
dput(dir);
|
||||
dir = NULL;
|
||||
|
||||
|
@ -705,7 +705,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
|
|||
cachefiles_io_error(cache, "Lookup failed");
|
||||
next = NULL;
|
||||
error:
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
dput(next);
|
||||
error_out2:
|
||||
dput(dir);
|
||||
|
@ -729,7 +729,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
|
|||
_enter(",,%s", dirname);
|
||||
|
||||
/* search the current directory for the element name */
|
||||
mutex_lock(&d_inode(dir)->i_mutex);
|
||||
inode_lock(d_inode(dir));
|
||||
|
||||
start = jiffies;
|
||||
subdir = lookup_one_len(dirname, dir, strlen(dirname));
|
||||
|
@ -768,7 +768,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
|
|||
d_backing_inode(subdir)->i_ino);
|
||||
}
|
||||
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
|
||||
/* we need to make sure the subdir is a directory */
|
||||
ASSERT(d_backing_inode(subdir));
|
||||
|
@ -800,19 +800,19 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
|
|||
return ERR_PTR(ret);
|
||||
|
||||
mkdir_error:
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
dput(subdir);
|
||||
pr_err("mkdir %s failed with error %d\n", dirname, ret);
|
||||
return ERR_PTR(ret);
|
||||
|
||||
lookup_error:
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
ret = PTR_ERR(subdir);
|
||||
pr_err("Lookup %s failed with error %d\n", dirname, ret);
|
||||
return ERR_PTR(ret);
|
||||
|
||||
nomem_d_alloc:
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
_leave(" = -ENOMEM");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
@ -837,7 +837,7 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
|
|||
// dir, filename);
|
||||
|
||||
/* look up the victim */
|
||||
mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT);
|
||||
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
|
||||
|
||||
start = jiffies;
|
||||
victim = lookup_one_len(filename, dir, strlen(filename));
|
||||
|
@ -852,7 +852,7 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
|
|||
* at the netfs's request whilst the cull was in progress
|
||||
*/
|
||||
if (d_is_negative(victim)) {
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
dput(victim);
|
||||
_leave(" = -ENOENT [absent]");
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
@ -881,13 +881,13 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
|
|||
|
||||
object_in_use:
|
||||
read_unlock(&cache->active_lock);
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
dput(victim);
|
||||
//_leave(" = -EBUSY [in use]");
|
||||
return ERR_PTR(-EBUSY);
|
||||
|
||||
lookup_error:
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
ret = PTR_ERR(victim);
|
||||
if (ret == -ENOENT) {
|
||||
/* file or dir now absent - probably retired by netfs */
|
||||
|
@ -947,7 +947,7 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
|
|||
return 0;
|
||||
|
||||
error_unlock:
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
error:
|
||||
dput(victim);
|
||||
if (ret == -ENOENT) {
|
||||
|
@ -982,7 +982,7 @@ int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
|
|||
if (IS_ERR(victim))
|
||||
return PTR_ERR(victim);
|
||||
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
dput(victim);
|
||||
//_leave(" = 0");
|
||||
return 0;
|
||||
|
|
|
@ -197,7 +197,7 @@ void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
|
|||
return;
|
||||
|
||||
/* Avoid multiple racing open requests */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
if (ci->fscache)
|
||||
goto done;
|
||||
|
@ -207,7 +207,7 @@ void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
|
|||
ci, true);
|
||||
fscache_check_consistency(ci->fscache);
|
||||
done:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -2030,7 +2030,7 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
if (datasync)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
dirty = try_flush_caps(inode, &flush_tid);
|
||||
dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
|
||||
|
@ -2046,7 +2046,7 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
ret = wait_event_interruptible(ci->i_cap_wq,
|
||||
caps_are_flushed(inode, flush_tid));
|
||||
}
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
out:
|
||||
dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
|
||||
return ret;
|
||||
|
|
|
@ -507,7 +507,7 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
|
|||
loff_t old_offset = ceph_make_fpos(fi->frag, fi->next_offset);
|
||||
loff_t retval;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
retval = -EINVAL;
|
||||
switch (whence) {
|
||||
case SEEK_CUR:
|
||||
|
@ -542,7 +542,7 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
|
|||
}
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
|
|
@ -215,7 +215,7 @@ static int ceph_get_name(struct dentry *parent, char *name,
|
|||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
mutex_lock(&d_inode(parent)->i_mutex);
|
||||
inode_lock(d_inode(parent));
|
||||
|
||||
req->r_inode = d_inode(child);
|
||||
ihold(d_inode(child));
|
||||
|
@ -224,7 +224,7 @@ static int ceph_get_name(struct dentry *parent, char *name,
|
|||
req->r_num_caps = 2;
|
||||
err = ceph_mdsc_do_request(mdsc, NULL, req);
|
||||
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
|
||||
if (!err) {
|
||||
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
|
||||
|
|
|
@ -1014,7 +1014,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
if (!prealloc_cf)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/* We can write back this queue in page reclaim */
|
||||
current->backing_dev_info = inode_to_bdi(inode);
|
||||
|
@ -1070,7 +1070,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
(iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
|
||||
struct ceph_snap_context *snapc;
|
||||
struct iov_iter data;
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (__ceph_have_pending_cap_snap(ci)) {
|
||||
|
@ -1097,7 +1097,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
"got EOLDSNAPC, retrying\n",
|
||||
inode, ceph_vinop(inode),
|
||||
pos, (unsigned)count);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
goto retry_snap;
|
||||
}
|
||||
if (written > 0)
|
||||
|
@ -1117,7 +1117,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
iocb->ki_pos = pos + written;
|
||||
if (inode->i_size > old_size)
|
||||
ceph_fscache_update_objectsize(inode);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
|
||||
if (written >= 0) {
|
||||
|
@ -1147,7 +1147,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
goto out_unlocked;
|
||||
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
out_unlocked:
|
||||
ceph_free_cap_flush(prealloc_cf);
|
||||
current->backing_dev_info = NULL;
|
||||
|
@ -1162,7 +1162,7 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
|
|||
struct inode *inode = file->f_mapping->host;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
|
||||
ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
|
||||
|
@ -1207,7 +1207,7 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
|
|||
offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
|
||||
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
@ -1363,7 +1363,7 @@ static long ceph_fallocate(struct file *file, int mode,
|
|||
if (!prealloc_cf)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
if (ceph_snap(inode) != CEPH_NOSNAP) {
|
||||
ret = -EROFS;
|
||||
|
@ -1418,7 +1418,7 @@ static long ceph_fallocate(struct file *file, int mode,
|
|||
|
||||
ceph_put_cap_refs(ci, got);
|
||||
unlock:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
ceph_free_cap_flush(prealloc_cf);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -640,9 +640,9 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
|
|||
while (*s && *s != sep)
|
||||
s++;
|
||||
|
||||
mutex_lock(&dir->i_mutex);
|
||||
inode_lock(dir);
|
||||
child = lookup_one_len(p, dentry, s - p);
|
||||
mutex_unlock(&dir->i_mutex);
|
||||
inode_unlock(dir);
|
||||
dput(dentry);
|
||||
dentry = child;
|
||||
} while (!IS_ERR(dentry));
|
||||
|
|
|
@ -2267,7 +2267,7 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
|
|||
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
|
||||
if (rc)
|
||||
return rc;
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
xid = get_xid();
|
||||
|
||||
|
@ -2292,7 +2292,7 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
|
|||
}
|
||||
|
||||
free_xid(xid);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -2309,7 +2309,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
|
||||
if (rc)
|
||||
return rc;
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
xid = get_xid();
|
||||
|
||||
|
@ -2326,7 +2326,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
}
|
||||
|
||||
free_xid(xid);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -2672,7 +2672,7 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
|
|||
* with a brlock that prevents writing.
|
||||
*/
|
||||
down_read(&cinode->lock_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
rc = generic_write_checks(iocb, from);
|
||||
if (rc <= 0)
|
||||
|
@ -2685,7 +2685,7 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
|
|||
else
|
||||
rc = -EACCES;
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
if (rc > 0) {
|
||||
ssize_t err = generic_write_sync(file, iocb->ki_pos - rc, rc);
|
||||
|
|
|
@ -427,13 +427,13 @@ static int coda_readdir(struct file *coda_file, struct dir_context *ctx)
|
|||
if (host_file->f_op->iterate) {
|
||||
struct inode *host_inode = file_inode(host_file);
|
||||
|
||||
mutex_lock(&host_inode->i_mutex);
|
||||
inode_lock(host_inode);
|
||||
ret = -ENOENT;
|
||||
if (!IS_DEADDIR(host_inode)) {
|
||||
ret = host_file->f_op->iterate(host_file, ctx);
|
||||
file_accessed(host_file);
|
||||
}
|
||||
mutex_unlock(&host_inode->i_mutex);
|
||||
inode_unlock(host_inode);
|
||||
return ret;
|
||||
}
|
||||
/* Venus: we must read Venus dirents from a file */
|
||||
|
|
|
@ -71,12 +71,12 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||
|
||||
host_file = cfi->cfi_container;
|
||||
file_start_write(host_file);
|
||||
mutex_lock(&coda_inode->i_mutex);
|
||||
inode_lock(coda_inode);
|
||||
ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos);
|
||||
coda_inode->i_size = file_inode(host_file)->i_size;
|
||||
coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
|
||||
coda_inode->i_mtime = coda_inode->i_ctime = CURRENT_TIME_SEC;
|
||||
mutex_unlock(&coda_inode->i_mutex);
|
||||
inode_unlock(coda_inode);
|
||||
file_end_write(host_file);
|
||||
return ret;
|
||||
}
|
||||
|
@ -203,7 +203,7 @@ int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync)
|
|||
err = filemap_write_and_wait_range(coda_inode->i_mapping, start, end);
|
||||
if (err)
|
||||
return err;
|
||||
mutex_lock(&coda_inode->i_mutex);
|
||||
inode_lock(coda_inode);
|
||||
|
||||
cfi = CODA_FTOC(coda_file);
|
||||
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
|
||||
|
@ -212,7 +212,7 @@ int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync)
|
|||
err = vfs_fsync(host_file, datasync);
|
||||
if (!err && !datasync)
|
||||
err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode));
|
||||
mutex_unlock(&coda_inode->i_mutex);
|
||||
inode_unlock(coda_inode);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -640,13 +640,13 @@ static void detach_groups(struct config_group *group)
|
|||
|
||||
child = sd->s_dentry;
|
||||
|
||||
mutex_lock(&d_inode(child)->i_mutex);
|
||||
inode_lock(d_inode(child));
|
||||
|
||||
configfs_detach_group(sd->s_element);
|
||||
d_inode(child)->i_flags |= S_DEAD;
|
||||
dont_mount(child);
|
||||
|
||||
mutex_unlock(&d_inode(child)->i_mutex);
|
||||
inode_unlock(d_inode(child));
|
||||
|
||||
d_delete(child);
|
||||
dput(child);
|
||||
|
@ -834,11 +834,11 @@ static int configfs_attach_item(struct config_item *parent_item,
|
|||
* the VFS may already have hit and used them. Thus,
|
||||
* we must lock them as rmdir() would.
|
||||
*/
|
||||
mutex_lock(&d_inode(dentry)->i_mutex);
|
||||
inode_lock(d_inode(dentry));
|
||||
configfs_remove_dir(item);
|
||||
d_inode(dentry)->i_flags |= S_DEAD;
|
||||
dont_mount(dentry);
|
||||
mutex_unlock(&d_inode(dentry)->i_mutex);
|
||||
inode_unlock(d_inode(dentry));
|
||||
d_delete(dentry);
|
||||
}
|
||||
}
|
||||
|
@ -874,7 +874,7 @@ static int configfs_attach_group(struct config_item *parent_item,
|
|||
* We must also lock the inode to remove it safely in case of
|
||||
* error, as rmdir() would.
|
||||
*/
|
||||
mutex_lock_nested(&d_inode(dentry)->i_mutex, I_MUTEX_CHILD);
|
||||
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
|
||||
configfs_adjust_dir_dirent_depth_before_populate(sd);
|
||||
ret = populate_groups(to_config_group(item));
|
||||
if (ret) {
|
||||
|
@ -883,7 +883,7 @@ static int configfs_attach_group(struct config_item *parent_item,
|
|||
dont_mount(dentry);
|
||||
}
|
||||
configfs_adjust_dir_dirent_depth_after_populate(sd);
|
||||
mutex_unlock(&d_inode(dentry)->i_mutex);
|
||||
inode_unlock(d_inode(dentry));
|
||||
if (ret)
|
||||
d_delete(dentry);
|
||||
}
|
||||
|
@ -1135,7 +1135,7 @@ int configfs_depend_item(struct configfs_subsystem *subsys,
|
|||
* subsystem is really registered, and so we need to lock out
|
||||
* configfs_[un]register_subsystem().
|
||||
*/
|
||||
mutex_lock(&d_inode(root)->i_mutex);
|
||||
inode_lock(d_inode(root));
|
||||
|
||||
subsys_sd = configfs_find_subsys_dentry(root->d_fsdata, s_item);
|
||||
if (!subsys_sd) {
|
||||
|
@ -1147,7 +1147,7 @@ int configfs_depend_item(struct configfs_subsystem *subsys,
|
|||
ret = configfs_do_depend_item(subsys_sd->s_dentry, target);
|
||||
|
||||
out_unlock_fs:
|
||||
mutex_unlock(&d_inode(root)->i_mutex);
|
||||
inode_unlock(d_inode(root));
|
||||
|
||||
/*
|
||||
* If we succeeded, the fs is pinned via other methods. If not,
|
||||
|
@ -1230,7 +1230,7 @@ int configfs_depend_item_unlocked(struct configfs_subsystem *caller_subsys,
|
|||
* additional locking to prevent other subsystem from being
|
||||
* unregistered
|
||||
*/
|
||||
mutex_lock(&d_inode(root->cg_item.ci_dentry)->i_mutex);
|
||||
inode_lock(d_inode(root->cg_item.ci_dentry));
|
||||
|
||||
/*
|
||||
* As we are trying to depend item from other subsystem
|
||||
|
@ -1254,7 +1254,7 @@ int configfs_depend_item_unlocked(struct configfs_subsystem *caller_subsys,
|
|||
* We were called from subsystem other than our target so we
|
||||
* took some locks so now it's time to release them
|
||||
*/
|
||||
mutex_unlock(&d_inode(root->cg_item.ci_dentry)->i_mutex);
|
||||
inode_unlock(d_inode(root->cg_item.ci_dentry));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1561,7 +1561,7 @@ int configfs_rename_dir(struct config_item * item, const char *new_name)
|
|||
down_write(&configfs_rename_sem);
|
||||
parent = item->parent->dentry;
|
||||
|
||||
mutex_lock(&d_inode(parent)->i_mutex);
|
||||
inode_lock(d_inode(parent));
|
||||
|
||||
new_dentry = lookup_one_len(new_name, parent, strlen(new_name));
|
||||
if (!IS_ERR(new_dentry)) {
|
||||
|
@ -1577,7 +1577,7 @@ int configfs_rename_dir(struct config_item * item, const char *new_name)
|
|||
error = -EEXIST;
|
||||
dput(new_dentry);
|
||||
}
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
up_write(&configfs_rename_sem);
|
||||
|
||||
return error;
|
||||
|
@ -1590,7 +1590,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
|
|||
struct configfs_dirent * parent_sd = dentry->d_fsdata;
|
||||
int err;
|
||||
|
||||
mutex_lock(&d_inode(dentry)->i_mutex);
|
||||
inode_lock(d_inode(dentry));
|
||||
/*
|
||||
* Fake invisibility if dir belongs to a group/default groups hierarchy
|
||||
* being attached
|
||||
|
@ -1603,7 +1603,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
|
|||
else
|
||||
err = 0;
|
||||
}
|
||||
mutex_unlock(&d_inode(dentry)->i_mutex);
|
||||
inode_unlock(d_inode(dentry));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1613,11 +1613,11 @@ static int configfs_dir_close(struct inode *inode, struct file *file)
|
|||
struct dentry * dentry = file->f_path.dentry;
|
||||
struct configfs_dirent * cursor = file->private_data;
|
||||
|
||||
mutex_lock(&d_inode(dentry)->i_mutex);
|
||||
inode_lock(d_inode(dentry));
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
list_del_init(&cursor->s_sibling);
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
mutex_unlock(&d_inode(dentry)->i_mutex);
|
||||
inode_unlock(d_inode(dentry));
|
||||
|
||||
release_configfs_dirent(cursor);
|
||||
|
||||
|
@ -1698,7 +1698,7 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence)
|
|||
{
|
||||
struct dentry * dentry = file->f_path.dentry;
|
||||
|
||||
mutex_lock(&d_inode(dentry)->i_mutex);
|
||||
inode_lock(d_inode(dentry));
|
||||
switch (whence) {
|
||||
case 1:
|
||||
offset += file->f_pos;
|
||||
|
@ -1706,7 +1706,7 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence)
|
|||
if (offset >= 0)
|
||||
break;
|
||||
default:
|
||||
mutex_unlock(&d_inode(dentry)->i_mutex);
|
||||
inode_unlock(d_inode(dentry));
|
||||
return -EINVAL;
|
||||
}
|
||||
if (offset != file->f_pos) {
|
||||
|
@ -1732,7 +1732,7 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence)
|
|||
spin_unlock(&configfs_dirent_lock);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&d_inode(dentry)->i_mutex);
|
||||
inode_unlock(d_inode(dentry));
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
@ -1767,14 +1767,14 @@ int configfs_register_group(struct config_group *parent_group,
|
|||
|
||||
parent = parent_group->cg_item.ci_dentry;
|
||||
|
||||
mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT);
|
||||
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
|
||||
ret = create_default_group(parent_group, group);
|
||||
if (!ret) {
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
}
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(configfs_register_group);
|
||||
|
@ -1791,7 +1791,7 @@ void configfs_unregister_group(struct config_group *group)
|
|||
struct dentry *dentry = group->cg_item.ci_dentry;
|
||||
struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
|
||||
|
||||
mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT);
|
||||
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
configfs_detach_prep(dentry, NULL);
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
|
@ -1800,7 +1800,7 @@ void configfs_unregister_group(struct config_group *group)
|
|||
d_inode(dentry)->i_flags |= S_DEAD;
|
||||
dont_mount(dentry);
|
||||
d_delete(dentry);
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
|
||||
dput(dentry);
|
||||
|
||||
|
@ -1872,7 +1872,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
|
|||
sd = root->d_fsdata;
|
||||
link_group(to_config_group(sd->s_element), group);
|
||||
|
||||
mutex_lock_nested(&d_inode(root)->i_mutex, I_MUTEX_PARENT);
|
||||
inode_lock_nested(d_inode(root), I_MUTEX_PARENT);
|
||||
|
||||
err = -ENOMEM;
|
||||
dentry = d_alloc_name(root, group->cg_item.ci_name);
|
||||
|
@ -1892,7 +1892,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
|
|||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&d_inode(root)->i_mutex);
|
||||
inode_unlock(d_inode(root));
|
||||
|
||||
if (err) {
|
||||
unlink_group(group);
|
||||
|
@ -1913,9 +1913,9 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
|
|||
return;
|
||||
}
|
||||
|
||||
mutex_lock_nested(&d_inode(root)->i_mutex,
|
||||
inode_lock_nested(d_inode(root),
|
||||
I_MUTEX_PARENT);
|
||||
mutex_lock_nested(&d_inode(dentry)->i_mutex, I_MUTEX_CHILD);
|
||||
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
|
||||
mutex_lock(&configfs_symlink_mutex);
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
if (configfs_detach_prep(dentry, NULL)) {
|
||||
|
@ -1926,11 +1926,11 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
|
|||
configfs_detach_group(&group->cg_item);
|
||||
d_inode(dentry)->i_flags |= S_DEAD;
|
||||
dont_mount(dentry);
|
||||
mutex_unlock(&d_inode(dentry)->i_mutex);
|
||||
inode_unlock(d_inode(dentry));
|
||||
|
||||
d_delete(dentry);
|
||||
|
||||
mutex_unlock(&d_inode(root)->i_mutex);
|
||||
inode_unlock(d_inode(root));
|
||||
|
||||
dput(dentry);
|
||||
|
||||
|
|
|
@ -540,10 +540,10 @@ int configfs_create_file(struct config_item * item, const struct configfs_attrib
|
|||
umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
|
||||
int error = 0;
|
||||
|
||||
mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_NORMAL);
|
||||
inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL);
|
||||
error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
|
||||
CONFIGFS_ITEM_ATTR);
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -562,10 +562,10 @@ int configfs_create_bin_file(struct config_item *item,
|
|||
umode_t mode = (bin_attr->cb_attr.ca_mode & S_IALLUGO) | S_IFREG;
|
||||
int error = 0;
|
||||
|
||||
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL);
|
||||
inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL);
|
||||
error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode,
|
||||
CONFIGFS_ITEM_BIN_ATTR);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
inode_unlock(dir->d_inode);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -255,7 +255,7 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name)
|
|||
/* no inode means this hasn't been made visible yet */
|
||||
return;
|
||||
|
||||
mutex_lock(&d_inode(dir)->i_mutex);
|
||||
inode_lock(d_inode(dir));
|
||||
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
|
||||
if (!sd->s_element)
|
||||
continue;
|
||||
|
@ -268,5 +268,5 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name)
|
|||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
}
|
||||
|
|
6
fs/dax.c
6
fs/dax.c
|
@ -248,10 +248,10 @@ ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
|
|||
|
||||
if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
retval = filemap_write_and_wait_range(mapping, pos, end - 1);
|
||||
if (retval) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -263,7 +263,7 @@ ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
|
|||
retval = dax_io(inode, iter, pos, end, get_block, &bh);
|
||||
|
||||
if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
if ((retval > 0) && end_io)
|
||||
end_io(iocb, pos, retval, bh.b_private);
|
||||
|
|
|
@ -2462,7 +2462,7 @@ EXPORT_SYMBOL(d_rehash);
|
|||
*/
|
||||
void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
|
||||
{
|
||||
BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
|
||||
BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
|
||||
BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
|
||||
|
||||
spin_lock(&dentry->d_lock);
|
||||
|
@ -2738,7 +2738,7 @@ static int __d_unalias(struct inode *inode,
|
|||
if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
|
||||
goto out_err;
|
||||
m1 = &dentry->d_sb->s_vfs_rename_mutex;
|
||||
if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
|
||||
if (!inode_trylock(alias->d_parent->d_inode))
|
||||
goto out_err;
|
||||
m2 = &alias->d_parent->d_inode->i_mutex;
|
||||
out_unalias:
|
||||
|
|
|
@ -265,7 +265,7 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
|
|||
if (!parent)
|
||||
parent = debugfs_mount->mnt_root;
|
||||
|
||||
mutex_lock(&d_inode(parent)->i_mutex);
|
||||
inode_lock(d_inode(parent));
|
||||
dentry = lookup_one_len(name, parent, strlen(name));
|
||||
if (!IS_ERR(dentry) && d_really_is_positive(dentry)) {
|
||||
dput(dentry);
|
||||
|
@ -273,7 +273,7 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
|
|||
}
|
||||
|
||||
if (IS_ERR(dentry)) {
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
|
||||
}
|
||||
|
||||
|
@ -282,7 +282,7 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
|
|||
|
||||
static struct dentry *failed_creating(struct dentry *dentry)
|
||||
{
|
||||
mutex_unlock(&d_inode(dentry->d_parent)->i_mutex);
|
||||
inode_unlock(d_inode(dentry->d_parent));
|
||||
dput(dentry);
|
||||
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
|
||||
return NULL;
|
||||
|
@ -290,7 +290,7 @@ static struct dentry *failed_creating(struct dentry *dentry)
|
|||
|
||||
static struct dentry *end_creating(struct dentry *dentry)
|
||||
{
|
||||
mutex_unlock(&d_inode(dentry->d_parent)->i_mutex);
|
||||
inode_unlock(d_inode(dentry->d_parent));
|
||||
return dentry;
|
||||
}
|
||||
|
||||
|
@ -560,9 +560,9 @@ void debugfs_remove(struct dentry *dentry)
|
|||
if (!parent || d_really_is_negative(parent))
|
||||
return;
|
||||
|
||||
mutex_lock(&d_inode(parent)->i_mutex);
|
||||
inode_lock(d_inode(parent));
|
||||
ret = __debugfs_remove(dentry, parent);
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
if (!ret)
|
||||
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
|
||||
}
|
||||
|
@ -594,7 +594,7 @@ void debugfs_remove_recursive(struct dentry *dentry)
|
|||
|
||||
parent = dentry;
|
||||
down:
|
||||
mutex_lock(&d_inode(parent)->i_mutex);
|
||||
inode_lock(d_inode(parent));
|
||||
loop:
|
||||
/*
|
||||
* The parent->d_subdirs is protected by the d_lock. Outside that
|
||||
|
@ -609,7 +609,7 @@ void debugfs_remove_recursive(struct dentry *dentry)
|
|||
/* perhaps simple_empty(child) makes more sense */
|
||||
if (!list_empty(&child->d_subdirs)) {
|
||||
spin_unlock(&parent->d_lock);
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
parent = child;
|
||||
goto down;
|
||||
}
|
||||
|
@ -630,10 +630,10 @@ void debugfs_remove_recursive(struct dentry *dentry)
|
|||
}
|
||||
spin_unlock(&parent->d_lock);
|
||||
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
child = parent;
|
||||
parent = parent->d_parent;
|
||||
mutex_lock(&d_inode(parent)->i_mutex);
|
||||
inode_lock(d_inode(parent));
|
||||
|
||||
if (child != dentry)
|
||||
/* go up */
|
||||
|
@ -641,7 +641,7 @@ void debugfs_remove_recursive(struct dentry *dentry)
|
|||
|
||||
if (!__debugfs_remove(child, parent))
|
||||
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
inode_unlock(d_inode(parent));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
|
||||
|
||||
|
|
|
@ -255,7 +255,7 @@ static int mknod_ptmx(struct super_block *sb)
|
|||
if (!uid_valid(root_uid) || !gid_valid(root_gid))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&d_inode(root)->i_mutex);
|
||||
inode_lock(d_inode(root));
|
||||
|
||||
/* If we have already created ptmx node, return */
|
||||
if (fsi->ptmx_dentry) {
|
||||
|
@ -292,7 +292,7 @@ static int mknod_ptmx(struct super_block *sb)
|
|||
fsi->ptmx_dentry = dentry;
|
||||
rc = 0;
|
||||
out:
|
||||
mutex_unlock(&d_inode(root)->i_mutex);
|
||||
inode_unlock(d_inode(root));
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -615,7 +615,7 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
|
|||
|
||||
sprintf(s, "%d", index);
|
||||
|
||||
mutex_lock(&d_inode(root)->i_mutex);
|
||||
inode_lock(d_inode(root));
|
||||
|
||||
dentry = d_alloc_name(root, s);
|
||||
if (dentry) {
|
||||
|
@ -626,7 +626,7 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
|
|||
inode = ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
mutex_unlock(&d_inode(root)->i_mutex);
|
||||
inode_unlock(d_inode(root));
|
||||
|
||||
return inode;
|
||||
}
|
||||
|
@ -671,7 +671,7 @@ void devpts_pty_kill(struct inode *inode)
|
|||
|
||||
BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
|
||||
|
||||
mutex_lock(&d_inode(root)->i_mutex);
|
||||
inode_lock(d_inode(root));
|
||||
|
||||
dentry = d_find_alias(inode);
|
||||
|
||||
|
@ -680,7 +680,7 @@ void devpts_pty_kill(struct inode *inode)
|
|||
dput(dentry); /* d_alloc_name() in devpts_pty_new() */
|
||||
dput(dentry); /* d_find_alias above */
|
||||
|
||||
mutex_unlock(&d_inode(root)->i_mutex);
|
||||
inode_unlock(d_inode(root));
|
||||
}
|
||||
|
||||
static int __init init_devpts_fs(void)
|
||||
|
|
|
@ -1157,12 +1157,12 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
|
|||
iocb->ki_filp->f_mapping;
|
||||
|
||||
/* will be released by direct_io_worker */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
retval = filemap_write_and_wait_range(mapping, offset,
|
||||
end - 1);
|
||||
if (retval) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
kmem_cache_free(dio_cache, dio);
|
||||
goto out;
|
||||
}
|
||||
|
@ -1173,7 +1173,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
|
|||
dio->i_size = i_size_read(inode);
|
||||
if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
|
||||
if (dio->flags & DIO_LOCKING)
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
kmem_cache_free(dio_cache, dio);
|
||||
retval = 0;
|
||||
goto out;
|
||||
|
@ -1295,7 +1295,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
|
|||
* of protecting us from looking up uninitialized blocks.
|
||||
*/
|
||||
if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
|
||||
mutex_unlock(&dio->inode->i_mutex);
|
||||
inode_unlock(dio->inode);
|
||||
|
||||
/*
|
||||
* The only time we want to leave bios in flight is when a successful
|
||||
|
|
|
@ -41,13 +41,13 @@ static struct dentry *lock_parent(struct dentry *dentry)
|
|||
struct dentry *dir;
|
||||
|
||||
dir = dget_parent(dentry);
|
||||
mutex_lock_nested(&(d_inode(dir)->i_mutex), I_MUTEX_PARENT);
|
||||
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
|
||||
return dir;
|
||||
}
|
||||
|
||||
static void unlock_dir(struct dentry *dir)
|
||||
{
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
inode_unlock(d_inode(dir));
|
||||
dput(dir);
|
||||
}
|
||||
|
||||
|
@ -397,11 +397,11 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
|
|||
int rc = 0;
|
||||
|
||||
lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
|
||||
mutex_lock(&d_inode(lower_dir_dentry)->i_mutex);
|
||||
inode_lock(d_inode(lower_dir_dentry));
|
||||
lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name,
|
||||
lower_dir_dentry,
|
||||
ecryptfs_dentry->d_name.len);
|
||||
mutex_unlock(&d_inode(lower_dir_dentry)->i_mutex);
|
||||
inode_unlock(d_inode(lower_dir_dentry));
|
||||
if (IS_ERR(lower_dentry)) {
|
||||
rc = PTR_ERR(lower_dentry);
|
||||
ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
|
||||
|
@ -426,11 +426,11 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
|
|||
"filename; rc = [%d]\n", __func__, rc);
|
||||
goto out;
|
||||
}
|
||||
mutex_lock(&d_inode(lower_dir_dentry)->i_mutex);
|
||||
inode_lock(d_inode(lower_dir_dentry));
|
||||
lower_dentry = lookup_one_len(encrypted_and_encoded_name,
|
||||
lower_dir_dentry,
|
||||
encrypted_and_encoded_name_size);
|
||||
mutex_unlock(&d_inode(lower_dir_dentry)->i_mutex);
|
||||
inode_unlock(d_inode(lower_dir_dentry));
|
||||
if (IS_ERR(lower_dentry)) {
|
||||
rc = PTR_ERR(lower_dentry);
|
||||
ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
|
||||
|
@ -869,9 +869,9 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
|
|||
if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
|
||||
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
|
||||
|
||||
mutex_lock(&d_inode(lower_dentry)->i_mutex);
|
||||
inode_lock(d_inode(lower_dentry));
|
||||
rc = notify_change(lower_dentry, &lower_ia, NULL);
|
||||
mutex_unlock(&d_inode(lower_dentry)->i_mutex);
|
||||
inode_unlock(d_inode(lower_dentry));
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
@ -970,9 +970,9 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
|
|||
if (lower_ia.ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
|
||||
lower_ia.ia_valid &= ~ATTR_MODE;
|
||||
|
||||
mutex_lock(&d_inode(lower_dentry)->i_mutex);
|
||||
inode_lock(d_inode(lower_dentry));
|
||||
rc = notify_change(lower_dentry, &lower_ia, NULL);
|
||||
mutex_unlock(&d_inode(lower_dentry)->i_mutex);
|
||||
inode_unlock(d_inode(lower_dentry));
|
||||
out:
|
||||
fsstack_copy_attr_all(inode, lower_inode);
|
||||
return rc;
|
||||
|
@ -1048,10 +1048,10 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
|
|||
rc = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
mutex_lock(&d_inode(lower_dentry)->i_mutex);
|
||||
inode_lock(d_inode(lower_dentry));
|
||||
rc = d_inode(lower_dentry)->i_op->getxattr(lower_dentry, name, value,
|
||||
size);
|
||||
mutex_unlock(&d_inode(lower_dentry)->i_mutex);
|
||||
inode_unlock(d_inode(lower_dentry));
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
@ -1075,9 +1075,9 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size)
|
|||
rc = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
mutex_lock(&d_inode(lower_dentry)->i_mutex);
|
||||
inode_lock(d_inode(lower_dentry));
|
||||
rc = d_inode(lower_dentry)->i_op->listxattr(lower_dentry, list, size);
|
||||
mutex_unlock(&d_inode(lower_dentry)->i_mutex);
|
||||
inode_unlock(d_inode(lower_dentry));
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
@ -1092,9 +1092,9 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name)
|
|||
rc = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
mutex_lock(&d_inode(lower_dentry)->i_mutex);
|
||||
inode_lock(d_inode(lower_dentry));
|
||||
rc = d_inode(lower_dentry)->i_op->removexattr(lower_dentry, name);
|
||||
mutex_unlock(&d_inode(lower_dentry)->i_mutex);
|
||||
inode_unlock(d_inode(lower_dentry));
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -436,7 +436,7 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
|
|||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
mutex_lock(&lower_inode->i_mutex);
|
||||
inode_lock(lower_inode);
|
||||
size = lower_inode->i_op->getxattr(lower_dentry, ECRYPTFS_XATTR_NAME,
|
||||
xattr_virt, PAGE_CACHE_SIZE);
|
||||
if (size < 0)
|
||||
|
@ -444,7 +444,7 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
|
|||
put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
|
||||
rc = lower_inode->i_op->setxattr(lower_dentry, ECRYPTFS_XATTR_NAME,
|
||||
xattr_virt, size, 0);
|
||||
mutex_unlock(&lower_inode->i_mutex);
|
||||
inode_unlock(lower_inode);
|
||||
if (rc)
|
||||
printk(KERN_ERR "Error whilst attempting to write inode size "
|
||||
"to lower file xattr; rc = [%d]\n", rc);
|
||||
|
|
|
@ -50,9 +50,9 @@ static ssize_t efivarfs_file_write(struct file *file,
|
|||
d_delete(file->f_path.dentry);
|
||||
dput(file->f_path.dentry);
|
||||
} else {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
i_size_write(inode, datasize + sizeof(attributes));
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
|
||||
bytes = count;
|
||||
|
|
|
@ -160,10 +160,10 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
|
|||
efivar_entry_size(entry, &size);
|
||||
efivar_entry_add(entry, &efivarfs_list);
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
inode->i_private = entry;
|
||||
i_size_write(inode, size + sizeof(entry->var.Attributes));
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
d_add(dentry, inode);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1307,13 +1307,13 @@ static void bprm_fill_uid(struct linux_binprm *bprm)
|
|||
return;
|
||||
|
||||
/* Be careful if suid/sgid is set */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/* reload atomically mode/uid/gid now that lock held */
|
||||
mode = inode->i_mode;
|
||||
uid = inode->i_uid;
|
||||
gid = inode->i_gid;
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
/* We ignore suid/sgid if there are no mappings for them in the ns */
|
||||
if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
|
||||
|
|
|
@ -52,9 +52,9 @@ static int exofs_file_fsync(struct file *filp, loff_t start, loff_t end,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
ret = sync_inode_metadata(filp->f_mapping->host, 1);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -124,10 +124,10 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
|
|||
int err;
|
||||
|
||||
parent = ERR_PTR(-EACCES);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
inode_lock(dentry->d_inode);
|
||||
if (mnt->mnt_sb->s_export_op->get_parent)
|
||||
parent = mnt->mnt_sb->s_export_op->get_parent(dentry);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
inode_unlock(dentry->d_inode);
|
||||
|
||||
if (IS_ERR(parent)) {
|
||||
dprintk("%s: get_parent of %ld failed, err %d\n",
|
||||
|
@ -143,9 +143,9 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
|
|||
if (err)
|
||||
goto out_err;
|
||||
dprintk("%s: found name: %s\n", __func__, nbuf);
|
||||
mutex_lock(&parent->d_inode->i_mutex);
|
||||
inode_lock(parent->d_inode);
|
||||
tmp = lookup_one_len(nbuf, parent, strlen(nbuf));
|
||||
mutex_unlock(&parent->d_inode->i_mutex);
|
||||
inode_unlock(parent->d_inode);
|
||||
if (IS_ERR(tmp)) {
|
||||
dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp));
|
||||
goto out_err;
|
||||
|
@ -503,10 +503,10 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
|
|||
*/
|
||||
err = exportfs_get_name(mnt, target_dir, nbuf, result);
|
||||
if (!err) {
|
||||
mutex_lock(&target_dir->d_inode->i_mutex);
|
||||
inode_lock(target_dir->d_inode);
|
||||
nresult = lookup_one_len(nbuf, target_dir,
|
||||
strlen(nbuf));
|
||||
mutex_unlock(&target_dir->d_inode->i_mutex);
|
||||
inode_unlock(target_dir->d_inode);
|
||||
if (!IS_ERR(nresult)) {
|
||||
if (nresult->d_inode) {
|
||||
dput(result);
|
||||
|
|
|
@ -51,10 +51,10 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
|
||||
flags = ext2_mask_flags(inode->i_mode, flags);
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
/* Is it quota file? Do not allow user to mess with it */
|
||||
if (IS_NOQUOTA(inode)) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
ret = -EPERM;
|
||||
goto setflags_out;
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
*/
|
||||
if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) {
|
||||
if (!capable(CAP_LINUX_IMMUTABLE)) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
ret = -EPERM;
|
||||
goto setflags_out;
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
|
||||
ext2_set_inode_flags(inode);
|
||||
inode->i_ctime = CURRENT_TIME_SEC;
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
mark_inode_dirty(inode);
|
||||
setflags_out:
|
||||
|
@ -102,10 +102,10 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
goto setversion_out;
|
||||
}
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
inode->i_ctime = CURRENT_TIME_SEC;
|
||||
inode->i_generation = generation;
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
mark_inode_dirty(inode);
|
||||
setversion_out:
|
||||
|
|
|
@ -2896,7 +2896,7 @@ do { \
|
|||
static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
|
||||
{
|
||||
WARN_ON_ONCE(S_ISREG(inode->i_mode) &&
|
||||
!mutex_is_locked(&inode->i_mutex));
|
||||
!inode_is_locked(inode));
|
||||
down_write(&EXT4_I(inode)->i_data_sem);
|
||||
if (newsize > EXT4_I(inode)->i_disksize)
|
||||
EXT4_I(inode)->i_disksize = newsize;
|
||||
|
|
|
@ -4799,7 +4799,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
|
|||
else
|
||||
max_blocks -= lblk;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/*
|
||||
* Indirect files do not support unwritten extnets
|
||||
|
@ -4902,7 +4902,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
|
|||
out_dio:
|
||||
ext4_inode_resume_unlocked_dio(inode);
|
||||
out_mutex:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -4973,7 +4973,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
|||
if (mode & FALLOC_FL_KEEP_SIZE)
|
||||
flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/*
|
||||
* We only support preallocation for extent-based files only
|
||||
|
@ -5006,7 +5006,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
|||
EXT4_I(inode)->i_sync_tid);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -5492,7 +5492,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
|
|||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
/*
|
||||
* There is no need to overlap collapse range with EOF, in which case
|
||||
* it is effectively a truncate operation
|
||||
|
@ -5587,7 +5587,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
|
|||
up_write(&EXT4_I(inode)->i_mmap_sem);
|
||||
ext4_inode_resume_unlocked_dio(inode);
|
||||
out_mutex:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -5638,7 +5638,7 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
|
|||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
/* Currently just for extent based files */
|
||||
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
|
@ -5757,7 +5757,7 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
|
|||
up_write(&EXT4_I(inode)->i_mmap_sem);
|
||||
ext4_inode_resume_unlocked_dio(inode);
|
||||
out_mutex:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -5792,8 +5792,8 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
|
|||
|
||||
BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
|
||||
BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
|
||||
BUG_ON(!mutex_is_locked(&inode1->i_mutex));
|
||||
BUG_ON(!mutex_is_locked(&inode2->i_mutex));
|
||||
BUG_ON(!inode_is_locked(inode1));
|
||||
BUG_ON(!inode_is_locked(inode2));
|
||||
|
||||
*erp = ext4_es_remove_extent(inode1, lblk1, count);
|
||||
if (unlikely(*erp))
|
||||
|
|
|
@ -113,7 +113,7 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
ext4_unwritten_wait(inode);
|
||||
}
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
ret = generic_write_checks(iocb, from);
|
||||
if (ret <= 0)
|
||||
goto out;
|
||||
|
@ -169,7 +169,7 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
}
|
||||
|
||||
ret = __generic_file_write_iter(iocb, from);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
if (ret > 0) {
|
||||
ssize_t err;
|
||||
|
@ -186,7 +186,7 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
return ret;
|
||||
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
if (aio_mutex)
|
||||
mutex_unlock(aio_mutex);
|
||||
return ret;
|
||||
|
@ -561,11 +561,11 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
|
|||
int blkbits;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
isize = i_size_read(inode);
|
||||
if (offset >= isize) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
|
@ -613,7 +613,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
|
|||
dataoff = (loff_t)last << blkbits;
|
||||
} while (last <= end);
|
||||
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
if (dataoff > isize)
|
||||
return -ENXIO;
|
||||
|
@ -634,11 +634,11 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
|
|||
int blkbits;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
isize = i_size_read(inode);
|
||||
if (offset >= isize) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
|
@ -689,7 +689,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
|
|||
break;
|
||||
} while (last <= end);
|
||||
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
if (holeoff > isize)
|
||||
holeoff = isize;
|
||||
|
|
|
@ -3231,7 +3231,7 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
|||
overwrite = *((int *)iocb->private);
|
||||
|
||||
if (overwrite)
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
/*
|
||||
* We could direct write to holes and fallocate.
|
||||
|
@ -3331,7 +3331,7 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
|||
inode_dio_end(inode);
|
||||
/* take i_mutex locking again if we do a ovewrite dio */
|
||||
if (overwrite)
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3653,7 +3653,7 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
|
|||
handle_t *handle;
|
||||
loff_t size = i_size_read(inode);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&inode->i_mutex));
|
||||
WARN_ON(!inode_is_locked(inode));
|
||||
if (offset > size || offset + len < size)
|
||||
return 0;
|
||||
|
||||
|
@ -3707,7 +3707,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
|
|||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/* No need to punch hole beyond i_size */
|
||||
if (offset >= inode->i_size)
|
||||
|
@ -3809,7 +3809,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
|
|||
up_write(&EXT4_I(inode)->i_mmap_sem);
|
||||
ext4_inode_resume_unlocked_dio(inode);
|
||||
out_mutex:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3879,7 +3879,7 @@ void ext4_truncate(struct inode *inode)
|
|||
* have i_mutex locked because it's not necessary.
|
||||
*/
|
||||
if (!(inode->i_state & (I_NEW|I_FREEING)))
|
||||
WARN_ON(!mutex_is_locked(&inode->i_mutex));
|
||||
WARN_ON(!inode_is_locked(inode));
|
||||
trace_ext4_truncate_enter(inode);
|
||||
|
||||
if (!ext4_can_truncate(inode))
|
||||
|
|
|
@ -330,7 +330,7 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
|
|||
return err;
|
||||
|
||||
err = -EPERM;
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
/* Is it quota file? Do not allow user to mess with it */
|
||||
if (IS_NOQUOTA(inode))
|
||||
goto out_unlock;
|
||||
|
@ -381,7 +381,7 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
|
|||
out_stop:
|
||||
ext4_journal_stop(handle);
|
||||
out_unlock:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
mnt_drop_write_file(filp);
|
||||
return err;
|
||||
}
|
||||
|
@ -464,9 +464,9 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
|
||||
flags = ext4_mask_flags(inode->i_mode, flags);
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
err = ext4_ioctl_setflags(inode, flags);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
mnt_drop_write_file(filp);
|
||||
return err;
|
||||
}
|
||||
|
@ -497,7 +497,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
goto setversion_out;
|
||||
}
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
|
||||
if (IS_ERR(handle)) {
|
||||
err = PTR_ERR(handle);
|
||||
|
@ -512,7 +512,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
ext4_journal_stop(handle);
|
||||
|
||||
unlock_out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
setversion_out:
|
||||
mnt_drop_write_file(filp);
|
||||
return err;
|
||||
|
@ -658,9 +658,9 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
* ext4_ext_swap_inode_data before we switch the
|
||||
* inode format to prevent read.
|
||||
*/
|
||||
mutex_lock(&(inode->i_mutex));
|
||||
inode_lock((inode));
|
||||
err = ext4_ext_migrate(inode);
|
||||
mutex_unlock(&(inode->i_mutex));
|
||||
inode_unlock((inode));
|
||||
mnt_drop_write_file(filp);
|
||||
return err;
|
||||
}
|
||||
|
@ -876,11 +876,11 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
flags = ext4_xflags_to_iflags(fa.fsx_xflags);
|
||||
flags = ext4_mask_flags(inode->i_mode, flags);
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
flags = (ei->i_flags & ~EXT4_FL_XFLAG_VISIBLE) |
|
||||
(flags & EXT4_FL_XFLAG_VISIBLE);
|
||||
err = ext4_ioctl_setflags(inode, flags);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
mnt_drop_write_file(filp);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -2753,7 +2753,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
|
|||
return 0;
|
||||
|
||||
WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
|
||||
!mutex_is_locked(&inode->i_mutex));
|
||||
!inode_is_locked(inode));
|
||||
/*
|
||||
* Exit early if inode already is on orphan list. This is a big speedup
|
||||
* since we don't have to contend on the global s_orphan_lock.
|
||||
|
@ -2835,7 +2835,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
|
|||
return 0;
|
||||
|
||||
WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
|
||||
!mutex_is_locked(&inode->i_mutex));
|
||||
!inode_is_locked(inode));
|
||||
/* Do this quick check before taking global s_orphan_lock. */
|
||||
if (list_empty(&ei->i_orphan))
|
||||
return 0;
|
||||
|
|
|
@ -2286,10 +2286,10 @@ static void ext4_orphan_cleanup(struct super_block *sb,
|
|||
__func__, inode->i_ino, inode->i_size);
|
||||
jbd_debug(2, "truncating inode %lu to %lld bytes\n",
|
||||
inode->i_ino, inode->i_size);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
truncate_inode_pages(inode->i_mapping, inode->i_size);
|
||||
ext4_truncate(inode);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
nr_truncates++;
|
||||
} else {
|
||||
if (test_opt(sb, DEBUG))
|
||||
|
|
|
@ -794,7 +794,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
isize = i_size_read(inode);
|
||||
if (start >= isize)
|
||||
|
@ -860,7 +860,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|||
if (ret == 1)
|
||||
ret = 0;
|
||||
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -333,7 +333,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
|
|||
loff_t isize;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
isize = i_size_read(inode);
|
||||
if (offset >= isize)
|
||||
|
@ -388,10 +388,10 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
|
|||
found:
|
||||
if (whence == SEEK_HOLE && data_ofs > isize)
|
||||
data_ofs = isize;
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return vfs_setpos(file, data_ofs, maxbytes);
|
||||
fail:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
|
@ -1219,7 +1219,7 @@ static long f2fs_fallocate(struct file *file, int mode,
|
|||
FALLOC_FL_INSERT_RANGE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
||||
if (offset >= inode->i_size)
|
||||
|
@ -1243,7 +1243,7 @@ static long f2fs_fallocate(struct file *file, int mode,
|
|||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
trace_f2fs_fallocate(inode, mode, offset, len, ret);
|
||||
return ret;
|
||||
|
@ -1307,13 +1307,13 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
|
|||
|
||||
flags = f2fs_mask_flags(inode->i_mode, flags);
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
oldflags = fi->i_flags;
|
||||
|
||||
if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
|
||||
if (!capable(CAP_LINUX_IMMUTABLE)) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1322,7 +1322,7 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
|
|||
flags = flags & FS_FL_USER_MODIFIABLE;
|
||||
flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
|
||||
fi->i_flags = flags;
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
f2fs_set_inode_flags(inode);
|
||||
inode->i_ctime = CURRENT_TIME;
|
||||
|
@ -1667,7 +1667,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
|
|||
|
||||
f2fs_balance_fs(sbi, true);
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/* writeback all dirty pages in the range */
|
||||
err = filemap_write_and_wait_range(inode->i_mapping, range->start,
|
||||
|
@ -1778,7 +1778,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
|
|||
clear_out:
|
||||
clear_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
if (!err)
|
||||
range->len = (u64)total << PAGE_CACHE_SHIFT;
|
||||
return err;
|
||||
|
|
|
@ -769,7 +769,7 @@ static int fat_ioctl_readdir(struct inode *inode, struct file *file,
|
|||
|
||||
buf.dirent = dirent;
|
||||
buf.result = 0;
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
buf.ctx.pos = file->f_pos;
|
||||
ret = -ENOENT;
|
||||
if (!IS_DEADDIR(inode)) {
|
||||
|
@ -777,7 +777,7 @@ static int fat_ioctl_readdir(struct inode *inode, struct file *file,
|
|||
short_only, both ? &buf : NULL);
|
||||
file->f_pos = buf.ctx.pos;
|
||||
}
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
if (ret >= 0)
|
||||
ret = buf.result;
|
||||
return ret;
|
||||
|
|
|
@ -24,9 +24,9 @@ static int fat_ioctl_get_attributes(struct inode *inode, u32 __user *user_attr)
|
|||
{
|
||||
u32 attr;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
attr = fat_make_attrs(inode);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return put_user(attr, user_attr);
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
|
|||
err = mnt_want_write_file(file);
|
||||
if (err)
|
||||
goto out;
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/*
|
||||
* ATTR_VOLUME and ATTR_DIR cannot be changed; this also
|
||||
|
@ -109,7 +109,7 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
|
|||
fat_save_attrs(inode, attr);
|
||||
mark_inode_dirty(inode);
|
||||
out_unlock_inode:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
mnt_drop_write_file(file);
|
||||
out:
|
||||
return err;
|
||||
|
@ -246,7 +246,7 @@ static long fat_fallocate(struct file *file, int mode,
|
|||
if (!S_ISREG(inode->i_mode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
if (mode & FALLOC_FL_KEEP_SIZE) {
|
||||
ondisksize = inode->i_blocks << 9;
|
||||
if ((offset + len) <= ondisksize)
|
||||
|
@ -272,7 +272,7 @@ static long fat_fallocate(struct file *file, int mode,
|
|||
}
|
||||
|
||||
error:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -944,7 +944,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
|
|||
if (!parent)
|
||||
return -ENOENT;
|
||||
|
||||
mutex_lock(&parent->i_mutex);
|
||||
inode_lock(parent);
|
||||
if (!S_ISDIR(parent->i_mode))
|
||||
goto unlock;
|
||||
|
||||
|
@ -962,7 +962,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
|
|||
fuse_invalidate_entry(entry);
|
||||
|
||||
if (child_nodeid != 0 && d_really_is_positive(entry)) {
|
||||
mutex_lock(&d_inode(entry)->i_mutex);
|
||||
inode_lock(d_inode(entry));
|
||||
if (get_node_id(d_inode(entry)) != child_nodeid) {
|
||||
err = -ENOENT;
|
||||
goto badentry;
|
||||
|
@ -983,7 +983,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
|
|||
clear_nlink(d_inode(entry));
|
||||
err = 0;
|
||||
badentry:
|
||||
mutex_unlock(&d_inode(entry)->i_mutex);
|
||||
inode_unlock(d_inode(entry));
|
||||
if (!err)
|
||||
d_delete(entry);
|
||||
} else {
|
||||
|
@ -992,7 +992,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
|
|||
dput(entry);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&parent->i_mutex);
|
||||
inode_unlock(parent);
|
||||
iput(parent);
|
||||
return err;
|
||||
}
|
||||
|
@ -1504,7 +1504,7 @@ void fuse_set_nowrite(struct inode *inode)
|
|||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
BUG_ON(!mutex_is_locked(&inode->i_mutex));
|
||||
BUG_ON(!inode_is_locked(inode));
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
BUG_ON(fi->writectr < 0);
|
||||
|
|
|
@ -207,7 +207,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
|
|||
return err;
|
||||
|
||||
if (lock_inode)
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
err = fuse_do_open(fc, get_node_id(inode), file, isdir);
|
||||
|
||||
|
@ -215,7 +215,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
|
|||
fuse_finish_open(inode, file);
|
||||
|
||||
if (lock_inode)
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -413,9 +413,9 @@ static int fuse_flush(struct file *file, fl_owner_t id)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
fuse_sync_writes(inode);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
req = fuse_get_req_nofail_nopages(fc, file);
|
||||
memset(&inarg, 0, sizeof(inarg));
|
||||
|
@ -450,7 +450,7 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
|
|||
if (is_bad_inode(inode))
|
||||
return -EIO;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/*
|
||||
* Start writeback against all dirty pages of the inode, then
|
||||
|
@ -486,7 +486,7 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
|
|||
err = 0;
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1160,7 +1160,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
return generic_file_write_iter(iocb, from);
|
||||
}
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/* We can write back this queue in page reclaim */
|
||||
current->backing_dev_info = inode_to_bdi(inode);
|
||||
|
@ -1210,7 +1210,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
}
|
||||
out:
|
||||
current->backing_dev_info = NULL;
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return written ? written : err;
|
||||
}
|
||||
|
@ -1322,10 +1322,10 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
|
|||
|
||||
if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
|
||||
if (!write)
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
fuse_sync_writes(inode);
|
||||
if (!write)
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
|
||||
while (count) {
|
||||
|
@ -1413,14 +1413,14 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
return -EIO;
|
||||
|
||||
/* Don't allow parallel writes to the same file */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
res = generic_write_checks(iocb, from);
|
||||
if (res > 0)
|
||||
res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
|
||||
fuse_invalidate_attr(inode);
|
||||
if (res > 0)
|
||||
fuse_write_update_size(inode, iocb->ki_pos);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -2287,17 +2287,17 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
|
|||
retval = generic_file_llseek(file, offset, whence);
|
||||
break;
|
||||
case SEEK_END:
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
retval = fuse_update_attributes(inode, NULL, file, NULL);
|
||||
if (!retval)
|
||||
retval = generic_file_llseek(file, offset, whence);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
break;
|
||||
case SEEK_HOLE:
|
||||
case SEEK_DATA:
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
retval = fuse_lseek(file, offset, whence);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
break;
|
||||
default:
|
||||
retval = -EINVAL;
|
||||
|
@ -2944,7 +2944,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
if (lock_inode) {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
||||
loff_t endbyte = offset + length - 1;
|
||||
err = filemap_write_and_wait_range(inode->i_mapping,
|
||||
|
@ -2990,7 +2990,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
|
|||
clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
|
||||
|
||||
if (lock_inode)
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -914,7 +914,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t le
|
|||
if ((mode & ~FALLOC_FL_KEEP_SIZE) || gfs2_is_jdata(ip))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
|
||||
ret = gfs2_glock_nq(&gh);
|
||||
|
@ -946,7 +946,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t le
|
|||
gfs2_glock_dq(&gh);
|
||||
out_uninit:
|
||||
gfs2_holder_uninit(&gh);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -2067,7 +2067,7 @@ static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
|
||||
if (ret)
|
||||
|
@ -2094,7 +2094,7 @@ static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|||
|
||||
gfs2_glock_dq_uninit(&gh);
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -888,7 +888,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
|||
return -ENOMEM;
|
||||
|
||||
sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
|
||||
mutex_lock(&ip->i_inode.i_mutex);
|
||||
inode_lock(&ip->i_inode);
|
||||
for (qx = 0; qx < num_qd; qx++) {
|
||||
error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
|
||||
GL_NOCACHE, &ghs[qx]);
|
||||
|
@ -953,7 +953,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
|||
out:
|
||||
while (qx--)
|
||||
gfs2_glock_dq_uninit(&ghs[qx]);
|
||||
mutex_unlock(&ip->i_inode.i_mutex);
|
||||
inode_unlock(&ip->i_inode);
|
||||
kfree(ghs);
|
||||
gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, NORMAL_FLUSH);
|
||||
return error;
|
||||
|
@ -1674,7 +1674,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
|
|||
if (error)
|
||||
goto out_put;
|
||||
|
||||
mutex_lock(&ip->i_inode.i_mutex);
|
||||
inode_lock(&ip->i_inode);
|
||||
error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
|
||||
if (error)
|
||||
goto out_unlockput;
|
||||
|
@ -1739,7 +1739,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
|
|||
out_q:
|
||||
gfs2_glock_dq_uninit(&q_gh);
|
||||
out_unlockput:
|
||||
mutex_unlock(&ip->i_inode.i_mutex);
|
||||
inode_unlock(&ip->i_inode);
|
||||
out_put:
|
||||
qd_put(qd);
|
||||
return error;
|
||||
|
|
|
@ -173,9 +173,9 @@ static int hfs_dir_release(struct inode *inode, struct file *file)
|
|||
{
|
||||
struct hfs_readdir_data *rd = file->private_data;
|
||||
if (rd) {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
list_del(&rd->list);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
kfree(rd);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -570,13 +570,13 @@ static int hfs_file_release(struct inode *inode, struct file *file)
|
|||
if (HFS_IS_RSRC(inode))
|
||||
inode = HFS_I(inode)->rsrc_inode;
|
||||
if (atomic_dec_and_test(&HFS_I(inode)->opencnt)) {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
hfs_file_truncate(inode);
|
||||
//if (inode->i_flags & S_DEAD) {
|
||||
// hfs_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
|
||||
// hfs_delete_inode(inode);
|
||||
//}
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -656,7 +656,7 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
|
|||
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
|
||||
if (ret)
|
||||
return ret;
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/* sync the inode to buffers */
|
||||
ret = write_inode_now(inode, 0);
|
||||
|
@ -668,7 +668,7 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
|
|||
err = sync_blockdev(sb->s_bdev);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -284,9 +284,9 @@ static int hfsplus_dir_release(struct inode *inode, struct file *file)
|
|||
{
|
||||
struct hfsplus_readdir_data *rd = file->private_data;
|
||||
if (rd) {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
list_del(&rd->list);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
kfree(rd);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -229,14 +229,14 @@ static int hfsplus_file_release(struct inode *inode, struct file *file)
|
|||
if (HFSPLUS_IS_RSRC(inode))
|
||||
inode = HFSPLUS_I(inode)->rsrc_inode;
|
||||
if (atomic_dec_and_test(&HFSPLUS_I(inode)->opencnt)) {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
hfsplus_file_truncate(inode);
|
||||
if (inode->i_flags & S_DEAD) {
|
||||
hfsplus_delete_cat(inode->i_ino,
|
||||
HFSPLUS_SB(sb)->hidden_dir, NULL);
|
||||
hfsplus_delete_inode(inode);
|
||||
}
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
|
|||
error = filemap_write_and_wait_range(inode->i_mapping, start, end);
|
||||
if (error)
|
||||
return error;
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/*
|
||||
* Sync inode metadata into the catalog and extent trees.
|
||||
|
@ -327,7 +327,7 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
|
|||
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
|
||||
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
|
||||
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
|
|||
goto out_drop_write;
|
||||
}
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
if ((flags & (FS_IMMUTABLE_FL|FS_APPEND_FL)) ||
|
||||
inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
|
||||
|
@ -126,7 +126,7 @@ static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
|
|||
mark_inode_dirty(inode);
|
||||
|
||||
out_unlock_inode:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
out_drop_write:
|
||||
mnt_drop_write_file(file);
|
||||
out:
|
||||
|
|
|
@ -378,9 +378,9 @@ static int hostfs_fsync(struct file *file, loff_t start, loff_t end,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
ret = fsync_file(HOSTFS_I(inode)->fd, datasync);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
|
|||
if (whence == SEEK_DATA || whence == SEEK_HOLE)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&i->i_mutex);
|
||||
inode_lock(i);
|
||||
hpfs_lock(s);
|
||||
|
||||
/*pr_info("dir lseek\n");*/
|
||||
|
@ -48,12 +48,12 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
|
|||
ok:
|
||||
filp->f_pos = new_off;
|
||||
hpfs_unlock(s);
|
||||
mutex_unlock(&i->i_mutex);
|
||||
inode_unlock(i);
|
||||
return new_off;
|
||||
fail:
|
||||
/*pr_warn("illegal lseek: %016llx\n", new_off);*/
|
||||
hpfs_unlock(s);
|
||||
mutex_unlock(&i->i_mutex);
|
||||
inode_unlock(i);
|
||||
return -ESPIPE;
|
||||
}
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
|
||||
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
file_accessed(file);
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
@ -157,7 +157,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
if (vma->vm_flags & VM_WRITE && inode->i_size < len)
|
||||
inode->i_size = len;
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -530,7 +530,7 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|||
if (hole_end > hole_start) {
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
i_mmap_lock_write(mapping);
|
||||
if (!RB_EMPTY_ROOT(&mapping->i_mmap))
|
||||
hugetlb_vmdelete_list(&mapping->i_mmap,
|
||||
|
@ -538,7 +538,7 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|||
hole_end >> PAGE_SHIFT);
|
||||
i_mmap_unlock_write(mapping);
|
||||
remove_inode_hugepages(inode, hole_start, hole_end);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -572,7 +572,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
|
|||
start = offset >> hpage_shift;
|
||||
end = (offset + len + hpage_size - 1) >> hpage_shift;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
|
||||
error = inode_newsize_ok(inode, offset + len);
|
||||
|
@ -659,7 +659,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
|
|||
i_size_write(inode, offset + len);
|
||||
inode->i_ctime = CURRENT_TIME;
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -966,9 +966,9 @@ void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
|
|||
swap(inode1, inode2);
|
||||
|
||||
if (inode1 && !S_ISDIR(inode1->i_mode))
|
||||
mutex_lock(&inode1->i_mutex);
|
||||
inode_lock(inode1);
|
||||
if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
|
||||
mutex_lock_nested(&inode2->i_mutex, I_MUTEX_NONDIR2);
|
||||
inode_lock_nested(inode2, I_MUTEX_NONDIR2);
|
||||
}
|
||||
EXPORT_SYMBOL(lock_two_nondirectories);
|
||||
|
||||
|
@ -980,9 +980,9 @@ EXPORT_SYMBOL(lock_two_nondirectories);
|
|||
void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
|
||||
{
|
||||
if (inode1 && !S_ISDIR(inode1->i_mode))
|
||||
mutex_unlock(&inode1->i_mutex);
|
||||
inode_unlock(inode1);
|
||||
if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
|
||||
mutex_unlock(&inode2->i_mutex);
|
||||
inode_unlock(inode2);
|
||||
}
|
||||
EXPORT_SYMBOL(unlock_two_nondirectories);
|
||||
|
||||
|
|
|
@ -434,9 +434,9 @@ int generic_block_fiemap(struct inode *inode,
|
|||
u64 len, get_block_t *get_block)
|
||||
{
|
||||
int ret;
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(generic_block_fiemap);
|
||||
|
|
|
@ -39,10 +39,10 @@ int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
/* Trigger GC to flush any pending writes for this inode */
|
||||
jffs2_flush_wbuf_gc(c, inode->i_ino);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -38,17 +38,17 @@ int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
if (!(inode->i_state & I_DIRTY_ALL) ||
|
||||
(datasync && !(inode->i_state & I_DIRTY_DATASYNC))) {
|
||||
/* Make sure committed changes hit the disk */
|
||||
jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc |= jfs_commit_inode(inode, 1);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return rc ? -EIO : 0;
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
}
|
||||
|
||||
/* Lock against other parallel changes of flags */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
|
||||
jfs_get_inode_flags(jfs_inode);
|
||||
oldflags = jfs_inode->mode2;
|
||||
|
@ -109,7 +109,7 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
((flags ^ oldflags) &
|
||||
(JFS_APPEND_FL | JFS_IMMUTABLE_FL))) {
|
||||
if (!capable(CAP_LINUX_IMMUTABLE)) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
err = -EPERM;
|
||||
goto setflags_out;
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
jfs_inode->mode2 = flags;
|
||||
|
||||
jfs_set_inode_flags(inode);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
inode->i_ctime = CURRENT_TIME_SEC;
|
||||
mark_inode_dirty(inode);
|
||||
setflags_out:
|
||||
|
|
|
@ -792,7 +792,7 @@ static ssize_t jfs_quota_write(struct super_block *sb, int type,
|
|||
struct buffer_head tmp_bh;
|
||||
struct buffer_head *bh;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
while (towrite > 0) {
|
||||
tocopy = sb->s_blocksize - offset < towrite ?
|
||||
sb->s_blocksize - offset : towrite;
|
||||
|
@ -824,7 +824,7 @@ static ssize_t jfs_quota_write(struct super_block *sb, int type,
|
|||
}
|
||||
out:
|
||||
if (len == towrite) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return err;
|
||||
}
|
||||
if (inode->i_size < off+len-towrite)
|
||||
|
@ -832,7 +832,7 @@ static ssize_t jfs_quota_write(struct super_block *sb, int type,
|
|||
inode->i_version++;
|
||||
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
mark_inode_dirty(inode);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return len - towrite;
|
||||
}
|
||||
|
||||
|
|
|
@ -1511,9 +1511,9 @@ static loff_t kernfs_dir_fop_llseek(struct file *file, loff_t offset,
|
|||
struct inode *inode = file_inode(file);
|
||||
loff_t ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
ret = generic_file_llseek(file, offset, whence);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
10
fs/libfs.c
10
fs/libfs.c
|
@ -89,7 +89,7 @@ EXPORT_SYMBOL(dcache_dir_close);
|
|||
loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
|
||||
{
|
||||
struct dentry *dentry = file->f_path.dentry;
|
||||
mutex_lock(&d_inode(dentry)->i_mutex);
|
||||
inode_lock(d_inode(dentry));
|
||||
switch (whence) {
|
||||
case 1:
|
||||
offset += file->f_pos;
|
||||
|
@ -97,7 +97,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
|
|||
if (offset >= 0)
|
||||
break;
|
||||
default:
|
||||
mutex_unlock(&d_inode(dentry)->i_mutex);
|
||||
inode_unlock(d_inode(dentry));
|
||||
return -EINVAL;
|
||||
}
|
||||
if (offset != file->f_pos) {
|
||||
|
@ -124,7 +124,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
|
|||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&d_inode(dentry)->i_mutex);
|
||||
inode_unlock(d_inode(dentry));
|
||||
return offset;
|
||||
}
|
||||
EXPORT_SYMBOL(dcache_dir_lseek);
|
||||
|
@ -941,7 +941,7 @@ int __generic_file_fsync(struct file *file, loff_t start, loff_t end,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
ret = sync_mapping_buffers(inode->i_mapping);
|
||||
if (!(inode->i_state & I_DIRTY_ALL))
|
||||
goto out;
|
||||
|
@ -953,7 +953,7 @@ int __generic_file_fsync(struct file *file, loff_t start, loff_t end,
|
|||
ret = err;
|
||||
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__generic_file_fsync);
|
||||
|
|
|
@ -1650,12 +1650,12 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
|
|||
* bother, maybe that's a sign this just isn't a good file to
|
||||
* hand out a delegation on.
|
||||
*/
|
||||
if (is_deleg && !mutex_trylock(&inode->i_mutex))
|
||||
if (is_deleg && !inode_trylock(inode))
|
||||
return -EAGAIN;
|
||||
|
||||
if (is_deleg && arg == F_WRLCK) {
|
||||
/* Write delegations are not currently supported: */
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
WARN_ON_ONCE(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1732,7 +1732,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
|
|||
spin_unlock(&ctx->flc_lock);
|
||||
locks_dispose_list(&dispose);
|
||||
if (is_deleg)
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
if (!error && !my_fl)
|
||||
*flp = NULL;
|
||||
return error;
|
||||
|
|
|
@ -204,12 +204,12 @@ long logfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
oldflags = li->li_flags;
|
||||
flags &= LOGFS_FL_USER_MODIFIABLE;
|
||||
flags |= oldflags & ~LOGFS_FL_USER_MODIFIABLE;
|
||||
li->li_flags = flags;
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
inode->i_ctime = CURRENT_TIME;
|
||||
mark_inode_dirty_sync(inode);
|
||||
|
@ -230,11 +230,11 @@ int logfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
logfs_get_wblocks(sb, NULL, WF_LOCK);
|
||||
logfs_write_anchor(sb);
|
||||
logfs_put_wblocks(sb, NULL, WF_LOCK);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user