forked from luck/tmp_suning_uos_patched
z3fold: fix stale list handling
Fix the situation when clear_bit() is called for page->private before the page pointer is actually assigned. While at it, remove work_busy() check because it is costly and does not give 100% guarantee anyway. Signed-off-by: Vitaly Wool <vitalywool@gmail.com> Cc: Dan Streetman <ddstreet@ieee.org> Cc: <Oleksiy.Avramchenko@sony.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
6818600ff0
commit
3552935742
|
@ -250,6 +250,7 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
|
|||
|
||||
WARN_ON(!list_empty(&zhdr->buddy));
|
||||
set_bit(PAGE_STALE, &page->private);
|
||||
clear_bit(NEEDS_COMPACTING, &page->private);
|
||||
spin_lock(&pool->lock);
|
||||
if (!list_empty(&page->lru))
|
||||
list_del(&page->lru);
|
||||
|
@ -303,7 +304,6 @@ static void free_pages_work(struct work_struct *w)
|
|||
list_del(&zhdr->buddy);
|
||||
if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
|
||||
continue;
|
||||
clear_bit(NEEDS_COMPACTING, &page->private);
|
||||
spin_unlock(&pool->stale_lock);
|
||||
cancel_work_sync(&zhdr->work);
|
||||
free_z3fold_page(page);
|
||||
|
@ -624,10 +624,8 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
|
|||
* stale pages list. cancel_work_sync() can sleep so we must make
|
||||
* sure it won't be called in case we're in atomic context.
|
||||
*/
|
||||
if (zhdr && (can_sleep || !work_pending(&zhdr->work) ||
|
||||
!unlikely(work_busy(&zhdr->work)))) {
|
||||
if (zhdr && (can_sleep || !work_pending(&zhdr->work))) {
|
||||
list_del(&zhdr->buddy);
|
||||
clear_bit(NEEDS_COMPACTING, &page->private);
|
||||
spin_unlock(&pool->stale_lock);
|
||||
if (can_sleep)
|
||||
cancel_work_sync(&zhdr->work);
|
||||
|
|
Loading…
Reference in New Issue
Block a user