forked from luck/tmp_suning_uos_patched
[PATCH] vmscan: balancing fix
Revert a patch which went into 2.6.8-rc1. The changelog for that patch was: The shrink_zone() logic can, under some circumstances, cause far too many pages to be reclaimed. Say, we're scanning at high priority and suddenly hit a large number of reclaimable pages on the LRU. Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed. Problem is, this change caused significant imbalance in inter-zone scan balancing by truncating scans of larger zones. Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone balancing algorithm would require that if we're scanning 100 pages of ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are reclaimed. Thus effectively causing smaller zones to be scanned relatively harder than large ones. Now I need to remember what the workload was which caused me to write this patch originally, then fix it up in a different way... Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
41e9b63b35
commit
210fe53030
|
@ -63,9 +63,6 @@ struct scan_control {
|
|||
|
||||
unsigned long nr_mapped; /* From page_state */
|
||||
|
||||
/* How many pages shrink_cache() should reclaim */
|
||||
int nr_to_reclaim;
|
||||
|
||||
/* Ask shrink_caches, or shrink_zone to scan at this priority */
|
||||
unsigned int priority;
|
||||
|
||||
|
@ -656,7 +653,6 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
|
|||
if (current_is_kswapd())
|
||||
mod_page_state(kswapd_steal, nr_freed);
|
||||
mod_page_state_zone(zone, pgsteal, nr_freed);
|
||||
sc->nr_to_reclaim -= nr_freed;
|
||||
|
||||
spin_lock_irq(&zone->lru_lock);
|
||||
/*
|
||||
|
@ -856,8 +852,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
|
|||
else
|
||||
nr_inactive = 0;
|
||||
|
||||
sc->nr_to_reclaim = sc->swap_cluster_max;
|
||||
|
||||
while (nr_active || nr_inactive) {
|
||||
if (nr_active) {
|
||||
sc->nr_to_scan = min(nr_active,
|
||||
|
@ -871,8 +865,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
|
|||
(unsigned long)sc->swap_cluster_max);
|
||||
nr_inactive -= sc->nr_to_scan;
|
||||
shrink_cache(zone, sc);
|
||||
if (sc->nr_to_reclaim <= 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user