forked from luck/tmp_suning_uos_patched
writeback: bdi_for_each_wb() iteration is memcg ID based not blkcg
wb's (bdi_writeback's) are currently keyed by memcg ID; however, in an earlier implementation, wb's were keyed by blkcg ID. bdi_for_each_wb() walks bdi->cgwb_tree in the ascending ID order and allows iterations to start from an arbitrary ID which is used to interrupt and resume iterations. Unfortunately, while changing wb to be keyed by memcg ID instead of blkcg, bdi_for_each_wb() was missed and is still assuming that wb's are keyed by blkcg ID. This doesn't affect iterations which don't get interrupted but bdi_split_work_to_wbs() makes use of iteration resuming on allocation failures and thus may incorrectly skip or repeat wb's. Fix it by changing bdi_for_each_wb() to take memcg IDs instead of blkcg IDs and updating bdi_split_work_to_wbs() accordingly. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
11743ee047
commit
1ed8d48c57
|
@ -839,7 +839,7 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
|
|||
bool skip_if_busy)
|
||||
{
|
||||
long nr_pages = base_work->nr_pages;
|
||||
int next_blkcg_id = 0;
|
||||
int next_memcg_id = 0;
|
||||
struct bdi_writeback *wb;
|
||||
struct wb_iter iter;
|
||||
|
||||
|
@ -849,14 +849,14 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
|
|||
return;
|
||||
restart:
|
||||
rcu_read_lock();
|
||||
bdi_for_each_wb(wb, bdi, &iter, next_blkcg_id) {
|
||||
bdi_for_each_wb(wb, bdi, &iter, next_memcg_id) {
|
||||
if (!wb_has_dirty_io(wb) ||
|
||||
(skip_if_busy && writeback_in_progress(wb)))
|
||||
continue;
|
||||
|
||||
base_work->nr_pages = wb_split_bdi_pages(wb, nr_pages);
|
||||
if (!wb_clone_and_queue_work(wb, base_work)) {
|
||||
next_blkcg_id = wb->blkcg_css->id + 1;
|
||||
next_memcg_id = wb->memcg_css->id + 1;
|
||||
rcu_read_unlock();
|
||||
wb_wait_for_single_work(bdi, base_work);
|
||||
goto restart;
|
||||
|
|
|
@ -402,7 +402,7 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
|
|||
}
|
||||
|
||||
struct wb_iter {
|
||||
int start_blkcg_id;
|
||||
int start_memcg_id;
|
||||
struct radix_tree_iter tree_iter;
|
||||
void **slot;
|
||||
};
|
||||
|
@ -414,9 +414,9 @@ static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
|
|||
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
|
||||
if (iter->start_blkcg_id >= 0) {
|
||||
iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id);
|
||||
iter->start_blkcg_id = -1;
|
||||
if (iter->start_memcg_id >= 0) {
|
||||
iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id);
|
||||
iter->start_memcg_id = -1;
|
||||
} else {
|
||||
iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
|
||||
}
|
||||
|
@ -430,30 +430,30 @@ static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
|
|||
|
||||
static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
|
||||
struct backing_dev_info *bdi,
|
||||
int start_blkcg_id)
|
||||
int start_memcg_id)
|
||||
{
|
||||
iter->start_blkcg_id = start_blkcg_id;
|
||||
iter->start_memcg_id = start_memcg_id;
|
||||
|
||||
if (start_blkcg_id)
|
||||
if (start_memcg_id)
|
||||
return __wb_iter_next(iter, bdi);
|
||||
else
|
||||
return &bdi->wb;
|
||||
}
|
||||
|
||||
/**
|
||||
* bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order
|
||||
* bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order
|
||||
* @wb_cur: cursor struct bdi_writeback pointer
|
||||
* @bdi: bdi to walk wb's of
|
||||
* @iter: pointer to struct wb_iter to be used as iteration buffer
|
||||
* @start_blkcg_id: blkcg ID to start iteration from
|
||||
* @start_memcg_id: memcg ID to start iteration from
|
||||
*
|
||||
* Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
|
||||
* blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter
|
||||
* memcg ID order starting from @start_memcg_id. @iter is struct wb_iter
|
||||
* to be used as temp storage during iteration. rcu_read_lock() must be
|
||||
* held throughout iteration.
|
||||
*/
|
||||
#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
|
||||
for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \
|
||||
#define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \
|
||||
for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \
|
||||
(wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
|
||||
|
||||
#else /* CONFIG_CGROUP_WRITEBACK */
|
||||
|
|
Loading…
Reference in New Issue
Block a user