vmscan: add customisable shrinker batch size

For shrinkers that have their own cond_resched* calls, having
shrink_slab break the work down into small batches is not
paticularly efficient. Add a custom batchsize field to the struct
shrinker so that shrinkers can use a larger batch size if they
desire.

A value of zero (uninitialised) means "use the default", so
behaviour is unchanged by this patch.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Dave Chinner 2011-07-08 14:14:37 +10:00 committed by Al Viro
parent 3567b59aa8
commit e9299f5058
2 changed files with 7 additions and 5 deletions

View File

@ -1150,6 +1150,7 @@ struct shrink_control {
struct shrinker { struct shrinker {
int (*shrink)(struct shrinker *, struct shrink_control *sc); int (*shrink)(struct shrinker *, struct shrink_control *sc);
int seeks; /* seeks to recreate an obj */ int seeks; /* seeks to recreate an obj */
long batch; /* reclaim batch size, 0 = default */
/* These are for internal use */ /* These are for internal use */
struct list_head list; struct list_head list;

View File

@ -253,6 +253,8 @@ unsigned long shrink_slab(struct shrink_control *shrink,
int shrink_ret = 0; int shrink_ret = 0;
long nr; long nr;
long new_nr; long new_nr;
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
/* /*
* copy the current shrinker scan count into a local variable * copy the current shrinker scan count into a local variable
@ -303,19 +305,18 @@ unsigned long shrink_slab(struct shrink_control *shrink,
nr_pages_scanned, lru_pages, nr_pages_scanned, lru_pages,
max_pass, delta, total_scan); max_pass, delta, total_scan);
while (total_scan >= SHRINK_BATCH) { while (total_scan >= batch_size) {
long this_scan = SHRINK_BATCH;
int nr_before; int nr_before;
nr_before = do_shrinker_shrink(shrinker, shrink, 0); nr_before = do_shrinker_shrink(shrinker, shrink, 0);
shrink_ret = do_shrinker_shrink(shrinker, shrink, shrink_ret = do_shrinker_shrink(shrinker, shrink,
this_scan); batch_size);
if (shrink_ret == -1) if (shrink_ret == -1)
break; break;
if (shrink_ret < nr_before) if (shrink_ret < nr_before)
ret += nr_before - shrink_ret; ret += nr_before - shrink_ret;
count_vm_events(SLABS_SCANNED, this_scan); count_vm_events(SLABS_SCANNED, batch_size);
total_scan -= this_scan; total_scan -= batch_size;
cond_resched(); cond_resched();
} }