2012-11-29 12:28:09 +08:00
|
|
|
/*
|
2012-11-14 15:59:04 +08:00
|
|
|
* fs/f2fs/dir.c
|
|
|
|
*
|
|
|
|
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
|
|
|
|
* http://www.samsung.com/
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/f2fs_fs.h>
|
|
|
|
#include "f2fs.h"
|
f2fs: fix handling errors got by f2fs_write_inode
Ruslan reported that f2fs hangs with an infinite loop in f2fs_sync_file():
while (sync_node_pages(sbi, inode->i_ino, &wbc) == 0)
f2fs_write_inode(inode, NULL);
The reason was revealed that the cold flag is not set even thought this inode is
a normal file. Therefore, sync_node_pages() skips to write node blocks since it
only writes cold node blocks.
The cold flag is stored to the node_footer in node block, and whenever a new
node page is allocated, it is set according to its file type, file or directory.
But, after sudden-power-off, when recovering the inode page, f2fs doesn't recover
its cold flag.
So, let's assign the cold flag in more right places.
One more thing:
If f2fs_write_inode() returns an error due to whatever situations, there would
be no dirty node pages so that sync_node_pages() returns zero.
(i.e., zero means nothing was written.)
Reported-by: Ruslan N. Marchenko <me@ruff.mobi>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-12-19 14:28:39 +08:00
|
|
|
#include "node.h"
|
2012-11-14 15:59:04 +08:00
|
|
|
#include "acl.h"
|
2013-06-03 18:46:19 +08:00
|
|
|
#include "xattr.h"
|
2012-11-14 15:59:04 +08:00
|
|
|
|
|
|
|
static unsigned long dir_blocks(struct inode *inode)
|
|
|
|
{
|
|
|
|
return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1))
|
|
|
|
>> PAGE_CACHE_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int dir_buckets(unsigned int level)
|
|
|
|
{
|
|
|
|
if (level < MAX_DIR_HASH_DEPTH / 2)
|
|
|
|
return 1 << level;
|
|
|
|
else
|
|
|
|
return 1 << ((MAX_DIR_HASH_DEPTH / 2) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int bucket_blocks(unsigned int level)
|
|
|
|
{
|
|
|
|
if (level < MAX_DIR_HASH_DEPTH / 2)
|
|
|
|
return 2;
|
|
|
|
else
|
|
|
|
return 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
|
|
|
|
[F2FS_FT_UNKNOWN] = DT_UNKNOWN,
|
|
|
|
[F2FS_FT_REG_FILE] = DT_REG,
|
|
|
|
[F2FS_FT_DIR] = DT_DIR,
|
|
|
|
[F2FS_FT_CHRDEV] = DT_CHR,
|
|
|
|
[F2FS_FT_BLKDEV] = DT_BLK,
|
|
|
|
[F2FS_FT_FIFO] = DT_FIFO,
|
|
|
|
[F2FS_FT_SOCK] = DT_SOCK,
|
|
|
|
[F2FS_FT_SYMLINK] = DT_LNK,
|
|
|
|
};
|
|
|
|
|
|
|
|
#define S_SHIFT 12
|
|
|
|
static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = {
|
|
|
|
[S_IFREG >> S_SHIFT] = F2FS_FT_REG_FILE,
|
|
|
|
[S_IFDIR >> S_SHIFT] = F2FS_FT_DIR,
|
|
|
|
[S_IFCHR >> S_SHIFT] = F2FS_FT_CHRDEV,
|
|
|
|
[S_IFBLK >> S_SHIFT] = F2FS_FT_BLKDEV,
|
|
|
|
[S_IFIFO >> S_SHIFT] = F2FS_FT_FIFO,
|
|
|
|
[S_IFSOCK >> S_SHIFT] = F2FS_FT_SOCK,
|
|
|
|
[S_IFLNK >> S_SHIFT] = F2FS_FT_SYMLINK,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void set_de_type(struct f2fs_dir_entry *de, struct inode *inode)
|
|
|
|
{
|
2013-03-30 00:23:28 +08:00
|
|
|
umode_t mode = inode->i_mode;
|
2012-11-14 15:59:04 +08:00
|
|
|
de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long dir_block_index(unsigned int level, unsigned int idx)
|
|
|
|
{
|
|
|
|
unsigned long i;
|
|
|
|
unsigned long bidx = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < level; i++)
|
|
|
|
bidx += dir_buckets(i) * bucket_blocks(i);
|
|
|
|
bidx += idx * bucket_blocks(level);
|
|
|
|
return bidx;
|
|
|
|
}
|
|
|
|
|
2012-12-28 01:55:46 +08:00
|
|
|
static bool early_match_name(const char *name, size_t namelen,
|
2012-11-14 15:59:04 +08:00
|
|
|
f2fs_hash_t namehash, struct f2fs_dir_entry *de)
|
|
|
|
{
|
|
|
|
if (le16_to_cpu(de->name_len) != namelen)
|
|
|
|
return false;
|
|
|
|
|
2012-11-28 15:12:41 +08:00
|
|
|
if (de->hash_code != namehash)
|
2012-11-14 15:59:04 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
|
2012-12-28 01:55:46 +08:00
|
|
|
const char *name, size_t namelen, int *max_slots,
|
2012-11-14 15:59:04 +08:00
|
|
|
f2fs_hash_t namehash, struct page **res_page)
|
|
|
|
{
|
|
|
|
struct f2fs_dir_entry *de;
|
|
|
|
unsigned long bit_pos, end_pos, next_pos;
|
|
|
|
struct f2fs_dentry_block *dentry_blk = kmap(dentry_page);
|
|
|
|
int slots;
|
|
|
|
|
|
|
|
bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
|
|
|
|
NR_DENTRY_IN_BLOCK, 0);
|
|
|
|
while (bit_pos < NR_DENTRY_IN_BLOCK) {
|
|
|
|
de = &dentry_blk->dentry[bit_pos];
|
2012-12-08 13:54:50 +08:00
|
|
|
slots = GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
|
2012-11-14 15:59:04 +08:00
|
|
|
|
|
|
|
if (early_match_name(name, namelen, namehash, de)) {
|
|
|
|
if (!memcmp(dentry_blk->filename[bit_pos],
|
|
|
|
name, namelen)) {
|
|
|
|
*res_page = dentry_page;
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
next_pos = bit_pos + slots;
|
|
|
|
bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
|
|
|
|
NR_DENTRY_IN_BLOCK, next_pos);
|
|
|
|
if (bit_pos >= NR_DENTRY_IN_BLOCK)
|
|
|
|
end_pos = NR_DENTRY_IN_BLOCK;
|
|
|
|
else
|
|
|
|
end_pos = bit_pos;
|
|
|
|
if (*max_slots < end_pos - next_pos)
|
|
|
|
*max_slots = end_pos - next_pos;
|
|
|
|
}
|
|
|
|
|
|
|
|
de = NULL;
|
|
|
|
kunmap(dentry_page);
|
|
|
|
found:
|
|
|
|
return de;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct f2fs_dir_entry *find_in_level(struct inode *dir,
|
2012-12-28 01:55:46 +08:00
|
|
|
unsigned int level, const char *name, size_t namelen,
|
2012-11-14 15:59:04 +08:00
|
|
|
f2fs_hash_t namehash, struct page **res_page)
|
|
|
|
{
|
2012-12-08 13:54:50 +08:00
|
|
|
int s = GET_DENTRY_SLOTS(namelen);
|
2012-11-14 15:59:04 +08:00
|
|
|
unsigned int nbucket, nblock;
|
|
|
|
unsigned int bidx, end_block;
|
|
|
|
struct page *dentry_page;
|
|
|
|
struct f2fs_dir_entry *de = NULL;
|
|
|
|
bool room = false;
|
|
|
|
int max_slots = 0;
|
|
|
|
|
2013-10-29 14:14:54 +08:00
|
|
|
f2fs_bug_on(level > MAX_DIR_HASH_DEPTH);
|
2012-11-14 15:59:04 +08:00
|
|
|
|
|
|
|
nbucket = dir_buckets(level);
|
|
|
|
nblock = bucket_blocks(level);
|
|
|
|
|
2012-11-28 15:12:41 +08:00
|
|
|
bidx = dir_block_index(level, le32_to_cpu(namehash) % nbucket);
|
2012-11-14 15:59:04 +08:00
|
|
|
end_block = bidx + nblock;
|
|
|
|
|
|
|
|
for (; bidx < end_block; bidx++) {
|
|
|
|
/* no need to allocate new dentry pages to all the indices */
|
f2fs: give a chance to merge IOs by IO scheduler
Previously, background GC submits many 4KB read requests to load victim blocks
and/or its (i)node blocks.
...
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb61, blkaddr = 0x3b964ed
f2fs_gc : block_rq_complete: 8,16 R () 499854968 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb6f, blkaddr = 0x3b964ee
f2fs_gc : block_rq_complete: 8,16 R () 499854976 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb79, blkaddr = 0x3b964ef
f2fs_gc : block_rq_complete: 8,16 R () 499854984 + 8 [0]
...
However, by the fact that many IOs are sequential, we can give a chance to merge
the IOs by IO scheduler.
In order to do that, let's use blk_plug.
...
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c6, blkaddr = 0x2e6ee
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c7, blkaddr = 0x2e6ef
<idle> : block_rq_complete: 8,16 R () 1519616 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1519848 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1520432 + 96 [0]
<idle> : block_rq_complete: 8,16 R () 1520536 + 104 [0]
<idle> : block_rq_complete: 8,16 R () 1521008 + 112 [0]
<idle> : block_rq_complete: 8,16 R () 1521440 + 152 [0]
<idle> : block_rq_complete: 8,16 R () 1521688 + 144 [0]
<idle> : block_rq_complete: 8,16 R () 1522128 + 192 [0]
<idle> : block_rq_complete: 8,16 R () 1523256 + 328 [0]
...
Note that this issue should be addressed in checkpoint, and some readahead
flows too.
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2013-04-24 12:19:56 +08:00
|
|
|
dentry_page = find_data_page(dir, bidx, true);
|
2012-11-14 15:59:04 +08:00
|
|
|
if (IS_ERR(dentry_page)) {
|
|
|
|
room = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
de = find_in_block(dentry_page, name, namelen,
|
|
|
|
&max_slots, namehash, res_page);
|
|
|
|
if (de)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (max_slots >= s)
|
|
|
|
room = true;
|
|
|
|
f2fs_put_page(dentry_page, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!de && room && F2FS_I(dir)->chash != namehash) {
|
|
|
|
F2FS_I(dir)->chash = namehash;
|
|
|
|
F2FS_I(dir)->clevel = level;
|
|
|
|
}
|
|
|
|
|
|
|
|
return de;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find an entry in the specified directory with the wanted name.
|
|
|
|
* It returns the page where the entry was found (as a parameter - res_page),
|
|
|
|
* and the entry itself. Page is returned mapped and unlocked.
|
|
|
|
* Entry is guaranteed to be valid.
|
|
|
|
*/
|
|
|
|
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
|
|
|
|
struct qstr *child, struct page **res_page)
|
|
|
|
{
|
|
|
|
const char *name = child->name;
|
2012-12-28 01:55:46 +08:00
|
|
|
size_t namelen = child->len;
|
2012-11-14 15:59:04 +08:00
|
|
|
unsigned long npages = dir_blocks(dir);
|
|
|
|
struct f2fs_dir_entry *de = NULL;
|
|
|
|
f2fs_hash_t name_hash;
|
|
|
|
unsigned int max_depth;
|
|
|
|
unsigned int level;
|
|
|
|
|
|
|
|
if (npages == 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
*res_page = NULL;
|
|
|
|
|
|
|
|
name_hash = f2fs_dentry_hash(name, namelen);
|
|
|
|
max_depth = F2FS_I(dir)->i_current_depth;
|
|
|
|
|
|
|
|
for (level = 0; level < max_depth; level++) {
|
|
|
|
de = find_in_level(dir, level, name,
|
|
|
|
namelen, name_hash, res_page);
|
|
|
|
if (de)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!de && F2FS_I(dir)->chash != name_hash) {
|
|
|
|
F2FS_I(dir)->chash = name_hash;
|
|
|
|
F2FS_I(dir)->clevel = level - 1;
|
|
|
|
}
|
|
|
|
return de;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
|
|
|
|
{
|
2013-05-23 21:58:07 +08:00
|
|
|
struct page *page;
|
|
|
|
struct f2fs_dir_entry *de;
|
|
|
|
struct f2fs_dentry_block *dentry_blk;
|
2012-11-14 15:59:04 +08:00
|
|
|
|
|
|
|
page = get_lock_data_page(dir, 0);
|
|
|
|
if (IS_ERR(page))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
dentry_blk = kmap(page);
|
|
|
|
de = &dentry_blk->dentry[1];
|
|
|
|
*p = page;
|
|
|
|
unlock_page(page);
|
|
|
|
return de;
|
|
|
|
}
|
|
|
|
|
|
|
|
ino_t f2fs_inode_by_name(struct inode *dir, struct qstr *qstr)
|
|
|
|
{
|
|
|
|
ino_t res = 0;
|
|
|
|
struct f2fs_dir_entry *de;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
de = f2fs_find_entry(dir, qstr, &page);
|
|
|
|
if (de) {
|
|
|
|
res = le32_to_cpu(de->ino);
|
|
|
|
kunmap(page);
|
|
|
|
f2fs_put_page(page, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
|
|
|
|
struct page *page, struct inode *inode)
|
|
|
|
{
|
|
|
|
lock_page(page);
|
|
|
|
wait_on_page_writeback(page);
|
|
|
|
de->ino = cpu_to_le32(inode->i_ino);
|
|
|
|
set_de_type(de, inode);
|
|
|
|
kunmap(page);
|
|
|
|
set_page_dirty(page);
|
|
|
|
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
|
|
|
|
mark_inode_dirty(dir);
|
f2fs: fix tracking parent inode number
Previously, f2fs didn't track the parent inode number correctly which is stored
in each f2fs_inode. In the case of the following scenario, a bug can be occured.
Let's suppose there are one directory, "/b", and two files, "/a" and "/b/a".
- pino of "/a" is ROOT_INO.
- pino of "/b/a" is DIR_B_INO.
Then,
# sync
: The inode pages of "/a" and "/b/a" contain the parent inode numbers as
ROOT_INO and DIR_B_INO respectively.
# mv /a /b/a
: The parent inode number of "/a" should be changed to DIR_B_INO, but f2fs
didn't do that. Ref. f2fs_set_link().
In order to fix this clearly, I added i_pino in f2fs_inode_info, and whenever
it needs to be changed like in f2fs_add_link() and f2fs_set_link(), it is
updated temporarily in f2fs_inode_info.
And later, f2fs_write_inode() stores the latest information to the inode pages.
For power-off-recovery, f2fs_sync_file() triggers simply f2fs_write_inode().
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-12-10 16:52:48 +08:00
|
|
|
|
2012-11-14 15:59:04 +08:00
|
|
|
f2fs_put_page(page, 1);
|
|
|
|
}
|
|
|
|
|
2013-05-20 09:10:29 +08:00
|
|
|
static void init_dent_inode(const struct qstr *name, struct page *ipage)
|
2012-11-14 15:59:04 +08:00
|
|
|
{
|
2013-12-26 15:30:41 +08:00
|
|
|
struct f2fs_inode *ri;
|
2012-11-14 15:59:04 +08:00
|
|
|
|
2013-01-26 05:01:21 +08:00
|
|
|
/* copy name info. to this inode page */
|
2013-12-26 15:30:41 +08:00
|
|
|
ri = F2FS_INODE(ipage);
|
|
|
|
ri->i_namelen = cpu_to_le32(name->len);
|
|
|
|
memcpy(ri->i_name, name->name, name->len);
|
2012-11-14 15:59:04 +08:00
|
|
|
set_page_dirty(ipage);
|
|
|
|
}
|
|
|
|
|
2013-07-18 17:02:31 +08:00
|
|
|
int update_dent_inode(struct inode *inode, const struct qstr *name)
|
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
page = get_node_page(sbi, inode->i_ino);
|
|
|
|
if (IS_ERR(page))
|
|
|
|
return PTR_ERR(page);
|
|
|
|
|
|
|
|
init_dent_inode(name, page);
|
|
|
|
f2fs_put_page(page, 1);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-20 09:10:29 +08:00
|
|
|
static int make_empty_dir(struct inode *inode,
|
|
|
|
struct inode *parent, struct page *page)
|
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 15:21:29 +08:00
|
|
|
{
|
|
|
|
struct page *dentry_page;
|
|
|
|
struct f2fs_dentry_block *dentry_blk;
|
|
|
|
struct f2fs_dir_entry *de;
|
|
|
|
void *kaddr;
|
|
|
|
|
2013-05-20 09:10:29 +08:00
|
|
|
dentry_page = get_new_data_page(inode, page, 0, true);
|
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 15:21:29 +08:00
|
|
|
if (IS_ERR(dentry_page))
|
|
|
|
return PTR_ERR(dentry_page);
|
|
|
|
|
|
|
|
kaddr = kmap_atomic(dentry_page);
|
|
|
|
dentry_blk = (struct f2fs_dentry_block *)kaddr;
|
|
|
|
|
|
|
|
de = &dentry_blk->dentry[0];
|
|
|
|
de->name_len = cpu_to_le16(1);
|
|
|
|
de->hash_code = 0;
|
|
|
|
de->ino = cpu_to_le32(inode->i_ino);
|
|
|
|
memcpy(dentry_blk->filename[0], ".", 1);
|
|
|
|
set_de_type(de, inode);
|
|
|
|
|
|
|
|
de = &dentry_blk->dentry[1];
|
|
|
|
de->hash_code = 0;
|
|
|
|
de->name_len = cpu_to_le16(2);
|
|
|
|
de->ino = cpu_to_le32(parent->i_ino);
|
|
|
|
memcpy(dentry_blk->filename[1], "..", 2);
|
|
|
|
set_de_type(de, inode);
|
|
|
|
|
|
|
|
test_and_set_bit_le(0, &dentry_blk->dentry_bitmap);
|
|
|
|
test_and_set_bit_le(1, &dentry_blk->dentry_bitmap);
|
|
|
|
kunmap_atomic(kaddr);
|
|
|
|
|
|
|
|
set_page_dirty(dentry_page);
|
|
|
|
f2fs_put_page(dentry_page, 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-20 09:10:29 +08:00
|
|
|
static struct page *init_inode_metadata(struct inode *inode,
|
2013-01-26 05:08:53 +08:00
|
|
|
struct inode *dir, const struct qstr *name)
|
2012-11-14 15:59:04 +08:00
|
|
|
{
|
2013-05-20 09:10:29 +08:00
|
|
|
struct page *page;
|
|
|
|
int err;
|
|
|
|
|
2012-11-14 15:59:04 +08:00
|
|
|
if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
|
2013-05-20 09:10:29 +08:00
|
|
|
page = new_inode_page(inode, name);
|
|
|
|
if (IS_ERR(page))
|
|
|
|
return page;
|
2012-11-14 15:59:04 +08:00
|
|
|
|
|
|
|
if (S_ISDIR(inode->i_mode)) {
|
2013-05-20 09:10:29 +08:00
|
|
|
err = make_empty_dir(inode, dir, page);
|
|
|
|
if (err)
|
|
|
|
goto error;
|
2012-11-14 15:59:04 +08:00
|
|
|
}
|
|
|
|
|
2013-10-28 12:17:54 +08:00
|
|
|
err = f2fs_init_acl(inode, dir, page);
|
2013-05-20 09:10:29 +08:00
|
|
|
if (err)
|
2013-12-27 16:04:17 +08:00
|
|
|
goto put_error;
|
2013-05-20 09:10:29 +08:00
|
|
|
|
2013-06-03 18:46:19 +08:00
|
|
|
err = f2fs_init_security(inode, dir, name, page);
|
|
|
|
if (err)
|
2013-12-27 16:04:17 +08:00
|
|
|
goto put_error;
|
2013-06-03 18:46:19 +08:00
|
|
|
|
2013-05-20 09:10:29 +08:00
|
|
|
wait_on_page_writeback(page);
|
2012-11-14 15:59:04 +08:00
|
|
|
} else {
|
2013-05-20 09:10:29 +08:00
|
|
|
page = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino);
|
|
|
|
if (IS_ERR(page))
|
|
|
|
return page;
|
|
|
|
|
|
|
|
wait_on_page_writeback(page);
|
|
|
|
set_cold_node(inode, page);
|
2012-11-14 15:59:04 +08:00
|
|
|
}
|
2013-05-20 09:10:29 +08:00
|
|
|
|
|
|
|
init_dent_inode(name, page);
|
|
|
|
|
2013-05-28 11:25:47 +08:00
|
|
|
/*
|
|
|
|
* This file should be checkpointed during fsync.
|
|
|
|
* We lost i_pino from now on.
|
|
|
|
*/
|
2012-11-14 15:59:04 +08:00
|
|
|
if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
|
2013-06-14 07:52:35 +08:00
|
|
|
file_lost_pino(inode);
|
2012-11-14 15:59:04 +08:00
|
|
|
inc_nlink(inode);
|
|
|
|
}
|
2013-05-20 09:10:29 +08:00
|
|
|
return page;
|
|
|
|
|
2013-12-27 16:04:17 +08:00
|
|
|
put_error:
|
2013-05-20 09:10:29 +08:00
|
|
|
f2fs_put_page(page, 1);
|
2013-12-27 16:04:17 +08:00
|
|
|
error:
|
2013-05-20 09:10:29 +08:00
|
|
|
remove_inode_page(inode);
|
|
|
|
return ERR_PTR(err);
|
2012-11-14 15:59:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void update_parent_metadata(struct inode *dir, struct inode *inode,
|
|
|
|
unsigned int current_depth)
|
|
|
|
{
|
|
|
|
if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
|
|
|
|
if (S_ISDIR(inode->i_mode)) {
|
|
|
|
inc_nlink(dir);
|
2013-06-07 21:08:23 +08:00
|
|
|
set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
|
2012-11-14 15:59:04 +08:00
|
|
|
}
|
|
|
|
clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
|
|
|
|
}
|
|
|
|
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
|
|
|
|
if (F2FS_I(dir)->i_current_depth != current_depth) {
|
|
|
|
F2FS_I(dir)->i_current_depth = current_depth;
|
2013-06-07 21:08:23 +08:00
|
|
|
set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
|
2012-11-14 15:59:04 +08:00
|
|
|
}
|
|
|
|
|
2013-06-07 21:08:23 +08:00
|
|
|
if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR))
|
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 15:21:29 +08:00
|
|
|
update_inode_page(dir);
|
2012-11-14 15:59:04 +08:00
|
|
|
else
|
|
|
|
mark_inode_dirty(dir);
|
|
|
|
|
|
|
|
if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK))
|
|
|
|
clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int room_for_filename(struct f2fs_dentry_block *dentry_blk, int slots)
|
|
|
|
{
|
|
|
|
int bit_start = 0;
|
|
|
|
int zero_start, zero_end;
|
|
|
|
next:
|
|
|
|
zero_start = find_next_zero_bit_le(&dentry_blk->dentry_bitmap,
|
|
|
|
NR_DENTRY_IN_BLOCK,
|
|
|
|
bit_start);
|
|
|
|
if (zero_start >= NR_DENTRY_IN_BLOCK)
|
|
|
|
return NR_DENTRY_IN_BLOCK;
|
|
|
|
|
|
|
|
zero_end = find_next_bit_le(&dentry_blk->dentry_bitmap,
|
|
|
|
NR_DENTRY_IN_BLOCK,
|
|
|
|
zero_start);
|
|
|
|
if (zero_end - zero_start >= slots)
|
|
|
|
return zero_start;
|
|
|
|
|
|
|
|
bit_start = zero_end + 1;
|
|
|
|
|
|
|
|
if (zero_end + 1 >= NR_DENTRY_IN_BLOCK)
|
|
|
|
return NR_DENTRY_IN_BLOCK;
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 15:21:29 +08:00
|
|
|
/*
|
2013-12-21 18:02:14 +08:00
|
|
|
* Caller should grab and release a rwsem by calling f2fs_lock_op() and
|
|
|
|
* f2fs_unlock_op().
|
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 15:21:29 +08:00
|
|
|
*/
|
2014-01-18 04:44:39 +08:00
|
|
|
int __f2fs_add_link(struct inode *dir, const struct qstr *name,
|
|
|
|
struct inode *inode)
|
2012-11-14 15:59:04 +08:00
|
|
|
{
|
|
|
|
unsigned int bit_pos;
|
|
|
|
unsigned int level;
|
|
|
|
unsigned int current_depth;
|
|
|
|
unsigned long bidx, block;
|
|
|
|
f2fs_hash_t dentry_hash;
|
|
|
|
struct f2fs_dir_entry *de;
|
|
|
|
unsigned int nbucket, nblock;
|
2013-01-26 05:15:43 +08:00
|
|
|
size_t namelen = name->len;
|
2012-11-14 15:59:04 +08:00
|
|
|
struct page *dentry_page = NULL;
|
|
|
|
struct f2fs_dentry_block *dentry_blk = NULL;
|
2012-12-08 13:54:50 +08:00
|
|
|
int slots = GET_DENTRY_SLOTS(namelen);
|
2013-05-20 09:10:29 +08:00
|
|
|
struct page *page;
|
2012-11-14 15:59:04 +08:00
|
|
|
int err = 0;
|
|
|
|
int i;
|
|
|
|
|
2013-01-26 05:15:43 +08:00
|
|
|
dentry_hash = f2fs_dentry_hash(name->name, name->len);
|
2012-11-14 15:59:04 +08:00
|
|
|
level = 0;
|
|
|
|
current_depth = F2FS_I(dir)->i_current_depth;
|
|
|
|
if (F2FS_I(dir)->chash == dentry_hash) {
|
|
|
|
level = F2FS_I(dir)->clevel;
|
|
|
|
F2FS_I(dir)->chash = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
start:
|
2013-12-05 17:15:22 +08:00
|
|
|
if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
|
2012-11-14 15:59:04 +08:00
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
/* Increase the depth, if required */
|
|
|
|
if (level == current_depth)
|
|
|
|
++current_depth;
|
|
|
|
|
|
|
|
nbucket = dir_buckets(level);
|
|
|
|
nblock = bucket_blocks(level);
|
|
|
|
|
2012-11-28 15:12:41 +08:00
|
|
|
bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket));
|
2012-11-14 15:59:04 +08:00
|
|
|
|
|
|
|
for (block = bidx; block <= (bidx + nblock - 1); block++) {
|
2013-05-20 08:55:50 +08:00
|
|
|
dentry_page = get_new_data_page(dir, NULL, block, true);
|
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 15:21:29 +08:00
|
|
|
if (IS_ERR(dentry_page))
|
2012-11-14 15:59:04 +08:00
|
|
|
return PTR_ERR(dentry_page);
|
|
|
|
|
|
|
|
dentry_blk = kmap(dentry_page);
|
|
|
|
bit_pos = room_for_filename(dentry_blk, slots);
|
|
|
|
if (bit_pos < NR_DENTRY_IN_BLOCK)
|
|
|
|
goto add_dentry;
|
|
|
|
|
|
|
|
kunmap(dentry_page);
|
|
|
|
f2fs_put_page(dentry_page, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Move to next level to find the empty slot for new dentry */
|
|
|
|
++level;
|
|
|
|
goto start;
|
|
|
|
add_dentry:
|
|
|
|
wait_on_page_writeback(dentry_page);
|
|
|
|
|
2013-05-20 09:10:29 +08:00
|
|
|
page = init_inode_metadata(inode, dir, name);
|
|
|
|
if (IS_ERR(page)) {
|
|
|
|
err = PTR_ERR(page);
|
|
|
|
goto fail;
|
|
|
|
}
|
2012-11-14 15:59:04 +08:00
|
|
|
de = &dentry_blk->dentry[bit_pos];
|
2012-11-28 15:12:41 +08:00
|
|
|
de->hash_code = dentry_hash;
|
2012-11-14 15:59:04 +08:00
|
|
|
de->name_len = cpu_to_le16(namelen);
|
2013-01-26 05:15:43 +08:00
|
|
|
memcpy(dentry_blk->filename[bit_pos], name->name, name->len);
|
2012-11-14 15:59:04 +08:00
|
|
|
de->ino = cpu_to_le32(inode->i_ino);
|
|
|
|
set_de_type(de, inode);
|
|
|
|
for (i = 0; i < slots; i++)
|
|
|
|
test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
|
|
|
|
set_page_dirty(dentry_page);
|
f2fs: fix tracking parent inode number
Previously, f2fs didn't track the parent inode number correctly which is stored
in each f2fs_inode. In the case of the following scenario, a bug can be occured.
Let's suppose there are one directory, "/b", and two files, "/a" and "/b/a".
- pino of "/a" is ROOT_INO.
- pino of "/b/a" is DIR_B_INO.
Then,
# sync
: The inode pages of "/a" and "/b/a" contain the parent inode numbers as
ROOT_INO and DIR_B_INO respectively.
# mv /a /b/a
: The parent inode number of "/a" should be changed to DIR_B_INO, but f2fs
didn't do that. Ref. f2fs_set_link().
In order to fix this clearly, I added i_pino in f2fs_inode_info, and whenever
it needs to be changed like in f2fs_add_link() and f2fs_set_link(), it is
updated temporarily in f2fs_inode_info.
And later, f2fs_write_inode() stores the latest information to the inode pages.
For power-off-recovery, f2fs_sync_file() triggers simply f2fs_write_inode().
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-12-10 16:52:48 +08:00
|
|
|
|
2013-05-20 09:10:29 +08:00
|
|
|
/* we don't need to mark_inode_dirty now */
|
f2fs: fix tracking parent inode number
Previously, f2fs didn't track the parent inode number correctly which is stored
in each f2fs_inode. In the case of the following scenario, a bug can be occured.
Let's suppose there are one directory, "/b", and two files, "/a" and "/b/a".
- pino of "/a" is ROOT_INO.
- pino of "/b/a" is DIR_B_INO.
Then,
# sync
: The inode pages of "/a" and "/b/a" contain the parent inode numbers as
ROOT_INO and DIR_B_INO respectively.
# mv /a /b/a
: The parent inode number of "/a" should be changed to DIR_B_INO, but f2fs
didn't do that. Ref. f2fs_set_link().
In order to fix this clearly, I added i_pino in f2fs_inode_info, and whenever
it needs to be changed like in f2fs_add_link() and f2fs_set_link(), it is
updated temporarily in f2fs_inode_info.
And later, f2fs_write_inode() stores the latest information to the inode pages.
For power-off-recovery, f2fs_sync_file() triggers simply f2fs_write_inode().
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-12-10 16:52:48 +08:00
|
|
|
F2FS_I(inode)->i_pino = dir->i_ino;
|
2013-05-20 09:10:29 +08:00
|
|
|
update_inode(inode, page);
|
|
|
|
f2fs_put_page(page, 1);
|
|
|
|
|
|
|
|
update_parent_metadata(dir, inode, current_depth);
|
2012-11-14 15:59:04 +08:00
|
|
|
fail:
|
2013-06-07 21:08:23 +08:00
|
|
|
clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
|
2012-11-14 15:59:04 +08:00
|
|
|
kunmap(dentry_page);
|
|
|
|
f2fs_put_page(dentry_page, 1);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-11-29 12:28:09 +08:00
|
|
|
/*
|
2012-11-14 15:59:04 +08:00
|
|
|
* It only removes the dentry from the dentry page,corresponding name
|
|
|
|
* entry in name page does not need to be touched during deletion.
|
|
|
|
*/
|
|
|
|
void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
|
|
|
|
struct inode *inode)
|
|
|
|
{
|
|
|
|
struct f2fs_dentry_block *dentry_blk;
|
|
|
|
unsigned int bit_pos;
|
|
|
|
struct address_space *mapping = page->mapping;
|
|
|
|
struct inode *dir = mapping->host;
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
|
2012-12-08 13:54:50 +08:00
|
|
|
int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
|
2012-11-14 15:59:04 +08:00
|
|
|
void *kaddr = page_address(page);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
lock_page(page);
|
|
|
|
wait_on_page_writeback(page);
|
|
|
|
|
|
|
|
dentry_blk = (struct f2fs_dentry_block *)kaddr;
|
|
|
|
bit_pos = dentry - (struct f2fs_dir_entry *)dentry_blk->dentry;
|
|
|
|
for (i = 0; i < slots; i++)
|
|
|
|
test_and_clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
|
|
|
|
|
|
|
|
/* Let's check and deallocate this dentry page */
|
|
|
|
bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
|
|
|
|
NR_DENTRY_IN_BLOCK,
|
|
|
|
0);
|
|
|
|
kunmap(page); /* kunmap - pair of f2fs_find_entry */
|
|
|
|
set_page_dirty(page);
|
|
|
|
|
|
|
|
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
|
|
|
|
|
|
|
|
if (inode && S_ISDIR(inode->i_mode)) {
|
|
|
|
drop_nlink(dir);
|
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 15:21:29 +08:00
|
|
|
update_inode_page(dir);
|
2012-11-14 15:59:04 +08:00
|
|
|
} else {
|
|
|
|
mark_inode_dirty(dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (inode) {
|
2013-01-12 13:41:33 +08:00
|
|
|
inode->i_ctime = CURRENT_TIME;
|
2012-11-14 15:59:04 +08:00
|
|
|
drop_nlink(inode);
|
|
|
|
if (S_ISDIR(inode->i_mode)) {
|
|
|
|
drop_nlink(inode);
|
|
|
|
i_size_write(inode, 0);
|
|
|
|
}
|
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 15:21:29 +08:00
|
|
|
update_inode_page(inode);
|
|
|
|
|
2012-11-14 15:59:04 +08:00
|
|
|
if (inode->i_nlink == 0)
|
|
|
|
add_orphan_inode(sbi, inode->i_ino);
|
2013-07-30 10:36:53 +08:00
|
|
|
else
|
|
|
|
release_orphan_inode(sbi);
|
2012-11-14 15:59:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (bit_pos == NR_DENTRY_IN_BLOCK) {
|
|
|
|
truncate_hole(dir, page->index, page->index + 1);
|
|
|
|
clear_page_dirty_for_io(page);
|
|
|
|
ClearPageUptodate(page);
|
|
|
|
dec_page_count(sbi, F2FS_DIRTY_DENTS);
|
|
|
|
inode_dec_dirty_dents(dir);
|
|
|
|
}
|
2012-12-08 13:54:35 +08:00
|
|
|
f2fs_put_page(page, 1);
|
2012-11-14 15:59:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool f2fs_empty_dir(struct inode *dir)
|
|
|
|
{
|
|
|
|
unsigned long bidx;
|
|
|
|
struct page *dentry_page;
|
|
|
|
unsigned int bit_pos;
|
|
|
|
struct f2fs_dentry_block *dentry_blk;
|
|
|
|
unsigned long nblock = dir_blocks(dir);
|
|
|
|
|
|
|
|
for (bidx = 0; bidx < nblock; bidx++) {
|
|
|
|
void *kaddr;
|
|
|
|
dentry_page = get_lock_data_page(dir, bidx);
|
|
|
|
if (IS_ERR(dentry_page)) {
|
|
|
|
if (PTR_ERR(dentry_page) == -ENOENT)
|
|
|
|
continue;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
kaddr = kmap_atomic(dentry_page);
|
|
|
|
dentry_blk = (struct f2fs_dentry_block *)kaddr;
|
|
|
|
if (bidx == 0)
|
|
|
|
bit_pos = 2;
|
|
|
|
else
|
|
|
|
bit_pos = 0;
|
|
|
|
bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
|
|
|
|
NR_DENTRY_IN_BLOCK,
|
|
|
|
bit_pos);
|
|
|
|
kunmap_atomic(kaddr);
|
|
|
|
|
|
|
|
f2fs_put_page(dentry_page, 1);
|
|
|
|
|
|
|
|
if (bit_pos < NR_DENTRY_IN_BLOCK)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-05-18 06:02:17 +08:00
|
|
|
static int f2fs_readdir(struct file *file, struct dir_context *ctx)
|
2012-11-14 15:59:04 +08:00
|
|
|
{
|
2013-01-24 06:07:38 +08:00
|
|
|
struct inode *inode = file_inode(file);
|
2012-11-14 15:59:04 +08:00
|
|
|
unsigned long npages = dir_blocks(inode);
|
2013-07-05 16:28:12 +08:00
|
|
|
unsigned int bit_pos = 0;
|
2012-11-14 15:59:04 +08:00
|
|
|
struct f2fs_dentry_block *dentry_blk = NULL;
|
|
|
|
struct f2fs_dir_entry *de = NULL;
|
|
|
|
struct page *dentry_page = NULL;
|
2013-05-18 06:02:17 +08:00
|
|
|
unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK);
|
2012-11-14 15:59:04 +08:00
|
|
|
unsigned char d_type = DT_UNKNOWN;
|
|
|
|
|
2013-05-18 06:02:17 +08:00
|
|
|
bit_pos = ((unsigned long)ctx->pos % NR_DENTRY_IN_BLOCK);
|
2012-11-14 15:59:04 +08:00
|
|
|
|
2014-01-18 04:44:39 +08:00
|
|
|
for (; n < npages; n++) {
|
2012-11-14 15:59:04 +08:00
|
|
|
dentry_page = get_lock_data_page(inode, n);
|
|
|
|
if (IS_ERR(dentry_page))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
dentry_blk = kmap(dentry_page);
|
|
|
|
while (bit_pos < NR_DENTRY_IN_BLOCK) {
|
|
|
|
bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
|
|
|
|
NR_DENTRY_IN_BLOCK,
|
|
|
|
bit_pos);
|
|
|
|
if (bit_pos >= NR_DENTRY_IN_BLOCK)
|
|
|
|
break;
|
|
|
|
|
|
|
|
de = &dentry_blk->dentry[bit_pos];
|
2013-05-18 06:02:17 +08:00
|
|
|
if (de->file_type < F2FS_FT_MAX)
|
|
|
|
d_type = f2fs_filetype_table[de->file_type];
|
|
|
|
else
|
|
|
|
d_type = DT_UNKNOWN;
|
|
|
|
if (!dir_emit(ctx,
|
2013-07-05 16:28:12 +08:00
|
|
|
dentry_blk->filename[bit_pos],
|
|
|
|
le16_to_cpu(de->name_len),
|
|
|
|
le32_to_cpu(de->ino), d_type))
|
|
|
|
goto stop;
|
|
|
|
|
|
|
|
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
|
|
|
|
ctx->pos = n * NR_DENTRY_IN_BLOCK + bit_pos;
|
2012-11-14 15:59:04 +08:00
|
|
|
}
|
|
|
|
bit_pos = 0;
|
2013-05-18 06:02:17 +08:00
|
|
|
ctx->pos = (n + 1) * NR_DENTRY_IN_BLOCK;
|
2012-11-14 15:59:04 +08:00
|
|
|
kunmap(dentry_page);
|
|
|
|
f2fs_put_page(dentry_page, 1);
|
|
|
|
dentry_page = NULL;
|
|
|
|
}
|
2013-07-05 16:28:12 +08:00
|
|
|
stop:
|
2012-11-14 15:59:04 +08:00
|
|
|
if (dentry_page && !IS_ERR(dentry_page)) {
|
|
|
|
kunmap(dentry_page);
|
|
|
|
f2fs_put_page(dentry_page, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct file_operations f2fs_dir_operations = {
|
|
|
|
.llseek = generic_file_llseek,
|
|
|
|
.read = generic_read_dir,
|
2013-05-18 06:02:17 +08:00
|
|
|
.iterate = f2fs_readdir,
|
2012-11-14 15:59:04 +08:00
|
|
|
.fsync = f2fs_sync_file,
|
|
|
|
.unlocked_ioctl = f2fs_ioctl,
|
|
|
|
};
|