forked from luck/tmp_suning_uos_patched
ext4: fix race in ext4_mb_add_n_trim()
In ext4_mb_add_n_trim(), lg_prealloc_lock should be taken when changing the lg_prealloc_list. Signed-off-by: Niu Yawei <yawei.niu@intel.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Cc: stable@vger.kernel.org
This commit is contained in:
parent
87e698734b
commit
f116700971
@ -4136,7 +4136,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
|
||||
/* The max size of hash table is PREALLOC_TB_SIZE */
|
||||
order = PREALLOC_TB_SIZE - 1;
|
||||
/* Add the prealloc space to lg */
|
||||
rcu_read_lock();
|
||||
spin_lock(&lg->lg_prealloc_lock);
|
||||
list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
|
||||
pa_inode_list) {
|
||||
spin_lock(&tmp_pa->pa_lock);
|
||||
@ -4160,12 +4160,12 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
|
||||
if (!added)
|
||||
list_add_tail_rcu(&pa->pa_inode_list,
|
||||
&lg->lg_prealloc_list[order]);
|
||||
rcu_read_unlock();
|
||||
spin_unlock(&lg->lg_prealloc_lock);
|
||||
|
||||
/* Now trim the list to be not more than 8 elements */
|
||||
if (lg_prealloc_count > 8) {
|
||||
ext4_mb_discard_lg_preallocations(sb, lg,
|
||||
order, lg_prealloc_count);
|
||||
order, lg_prealloc_count);
|
||||
return;
|
||||
}
|
||||
return ;
|
||||
|
Loading…
Reference in New Issue
Block a user