forked from luck/tmp_suning_uos_patched
btrfs: Remove extent_io_ops::fill_delalloc
This callback is called only from writepage_delalloc which in turn is guaranteed to be called from the data page writeout path. In the end there is no reason to have the call to this function to be indrected via the extent_io_ops structure. This patch removes the callback definition, exports the function and calls it directly. No functional changes. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> [ rename to btrfs_run_delalloc_range ] Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
06f2548f9d
commit
5eaad97af8
|
@ -3189,6 +3189,9 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
|
|||
struct btrfs_trans_handle *trans, int mode,
|
||||
u64 start, u64 num_bytes, u64 min_size,
|
||||
loff_t actual_len, u64 *alloc_hint);
|
||||
int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
|
||||
u64 start, u64 end, int *page_started, unsigned long *nr_written,
|
||||
struct writeback_control *wbc);
|
||||
extern const struct dentry_operations btrfs_dentry_operations;
|
||||
|
||||
/* ioctl.c */
|
||||
|
|
|
@ -3205,7 +3205,7 @@ static void update_nr_written(struct writeback_control *wbc,
|
|||
/*
|
||||
* helper for __extent_writepage, doing all of the delayed allocation setup.
|
||||
*
|
||||
* This returns 1 if our fill_delalloc function did all the work required
|
||||
* This returns 1 if btrfs_run_delalloc_range function did all the work required
|
||||
* to write the page (copy into inline extent). In this case the IO has
|
||||
* been started and the page is already unlocked.
|
||||
*
|
||||
|
@ -3226,7 +3226,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
|
|||
int ret;
|
||||
int page_started = 0;
|
||||
|
||||
if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
|
||||
if (epd->extent_locked)
|
||||
return 0;
|
||||
|
||||
while (delalloc_end < page_end) {
|
||||
|
@ -3239,18 +3239,16 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
|
|||
delalloc_start = delalloc_end + 1;
|
||||
continue;
|
||||
}
|
||||
ret = tree->ops->fill_delalloc(inode, page,
|
||||
delalloc_start,
|
||||
delalloc_end,
|
||||
&page_started,
|
||||
nr_written, wbc);
|
||||
ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
|
||||
delalloc_end, &page_started, nr_written, wbc);
|
||||
/* File system has been set read-only */
|
||||
if (ret) {
|
||||
SetPageError(page);
|
||||
/* fill_delalloc should be return < 0 for error
|
||||
* but just in case, we use > 0 here meaning the
|
||||
* IO is started, so we don't want to return > 0
|
||||
* unless things are going well.
|
||||
/*
|
||||
* btrfs_run_delalloc_range should return < 0 for error
|
||||
* but just in case, we use > 0 here meaning the IO is
|
||||
* started, so we don't want to return > 0 unless
|
||||
* things are going well.
|
||||
*/
|
||||
ret = ret < 0 ? ret : -EIO;
|
||||
goto done;
|
||||
|
|
|
@ -106,11 +106,6 @@ struct extent_io_ops {
|
|||
/*
|
||||
* Optional hooks, called if the pointer is not NULL
|
||||
*/
|
||||
int (*fill_delalloc)(void *private_data, struct page *locked_page,
|
||||
u64 start, u64 end, int *page_started,
|
||||
unsigned long *nr_written,
|
||||
struct writeback_control *wbc);
|
||||
|
||||
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
|
||||
void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
|
||||
struct extent_state *state, int uptodate);
|
||||
|
|
|
@ -109,8 +109,8 @@ static void __endio_write_update_ordered(struct inode *inode,
|
|||
* extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
|
||||
* and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
|
||||
* to be released, which we want to happen only when finishing the ordered
|
||||
* extent (btrfs_finish_ordered_io()). Also note that the caller of the
|
||||
* fill_delalloc() callback already does proper cleanup for the first page of
|
||||
* extent (btrfs_finish_ordered_io()). Also note that the caller of
|
||||
* btrfs_run_delalloc_range already does proper cleanup for the first page of
|
||||
* the range, that is, it invokes the callback writepage_end_io_hook() for the
|
||||
* range of the first page.
|
||||
*/
|
||||
|
@ -1576,12 +1576,12 @@ static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
|
|||
}
|
||||
|
||||
/*
|
||||
* extent_io.c call back to do delayed allocation processing
|
||||
* Function to process delayed allocation (create CoW) for ranges which are
|
||||
* being touched for the first time.
|
||||
*/
|
||||
static int run_delalloc_range(void *private_data, struct page *locked_page,
|
||||
u64 start, u64 end, int *page_started,
|
||||
unsigned long *nr_written,
|
||||
struct writeback_control *wbc)
|
||||
int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
|
||||
u64 start, u64 end, int *page_started, unsigned long *nr_written,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct inode *inode = private_data;
|
||||
int ret;
|
||||
|
@ -10526,7 +10526,6 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
|
|||
.readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
|
||||
|
||||
/* optional callbacks */
|
||||
.fill_delalloc = run_delalloc_range,
|
||||
.writepage_end_io_hook = btrfs_writepage_end_io_hook,
|
||||
.writepage_start_hook = btrfs_writepage_start_hook,
|
||||
.set_bit_hook = btrfs_set_bit_hook,
|
||||
|
|
Loading…
Reference in New Issue
Block a user