forked from luck/tmp_suning_uos_patched
vmscan: protect reading of reclaim_stat with lru_lock
Rik van Riel pointed out reading reclaim_stat should be protected
lru_lock, otherwise vmscan might sweep 2x much pages.
This fault was introduced by
commit 4f98a2fee8
Author: Rik van Riel <riel@redhat.com>
Date: Sat Oct 18 20:26:32 2008 -0700
vmscan: split LRU lists into anon & file sets
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1574804899
commit
58c37f6e0d
20
mm/vmscan.c
20
mm/vmscan.c
|
@ -1627,6 +1627,13 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* With swappiness at 100, anonymous and file have the same priority.
|
||||||
|
* This scanning priority is essentially the inverse of IO cost.
|
||||||
|
*/
|
||||||
|
anon_prio = sc->swappiness;
|
||||||
|
file_prio = 200 - sc->swappiness;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* OK, so we have swap space and a fair amount of page cache
|
* OK, so we have swap space and a fair amount of page cache
|
||||||
* pages. We use the recently rotated / recently scanned
|
* pages. We use the recently rotated / recently scanned
|
||||||
|
@ -1638,27 +1645,17 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
|
||||||
*
|
*
|
||||||
* anon in [0], file in [1]
|
* anon in [0], file in [1]
|
||||||
*/
|
*/
|
||||||
|
spin_lock_irq(&zone->lru_lock);
|
||||||
if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
|
if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
|
||||||
spin_lock_irq(&zone->lru_lock);
|
|
||||||
reclaim_stat->recent_scanned[0] /= 2;
|
reclaim_stat->recent_scanned[0] /= 2;
|
||||||
reclaim_stat->recent_rotated[0] /= 2;
|
reclaim_stat->recent_rotated[0] /= 2;
|
||||||
spin_unlock_irq(&zone->lru_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
|
if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
|
||||||
spin_lock_irq(&zone->lru_lock);
|
|
||||||
reclaim_stat->recent_scanned[1] /= 2;
|
reclaim_stat->recent_scanned[1] /= 2;
|
||||||
reclaim_stat->recent_rotated[1] /= 2;
|
reclaim_stat->recent_rotated[1] /= 2;
|
||||||
spin_unlock_irq(&zone->lru_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* With swappiness at 100, anonymous and file have the same priority.
|
|
||||||
* This scanning priority is essentially the inverse of IO cost.
|
|
||||||
*/
|
|
||||||
anon_prio = sc->swappiness;
|
|
||||||
file_prio = 200 - sc->swappiness;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The amount of pressure on anon vs file pages is inversely
|
* The amount of pressure on anon vs file pages is inversely
|
||||||
* proportional to the fraction of recently scanned pages on
|
* proportional to the fraction of recently scanned pages on
|
||||||
|
@ -1669,6 +1666,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
|
||||||
|
|
||||||
fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
|
fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
|
||||||
fp /= reclaim_stat->recent_rotated[1] + 1;
|
fp /= reclaim_stat->recent_rotated[1] + 1;
|
||||||
|
spin_unlock_irq(&zone->lru_lock);
|
||||||
|
|
||||||
fraction[0] = ap;
|
fraction[0] = ap;
|
||||||
fraction[1] = fp;
|
fraction[1] = fp;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user