Skip to content

Commit

Permalink
shrinker: Kill old ->shrink API.
Browse files Browse the repository at this point in the history
There are no more users of this API, so kill it dead, dead, dead and
quietly bury the corpse in a shallow, unmarked grave in a dark forest deep
in the hills...

[glommer@openvz.org: added flowers to the grave]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Glauber Costa <glommer@openvz.org>
Reviewed-by: Greg Thelen <gthelen@google.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
  • Loading branch information
Dave Chinner authored and Al Viro committed Sep 10, 2013
1 parent 70534a7 commit a0b0213
Showing 3 changed files with 15 additions and 45 deletions.
15 changes: 5 additions & 10 deletions include/linux/shrinker.h
Original file line number Diff line number Diff line change
@@ -7,14 +7,15 @@
*
* The 'gfpmask' refers to the allocation we are currently trying to
* fulfil.
*
* Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
* querying the cache size, so a fastpath for that case is appropriate.
*/
struct shrink_control {
gfp_t gfp_mask;

/* How many slab objects shrinker() should scan and try to reclaim */
/*
* How many objects scan_objects should scan and try to reclaim.
* This is reset before every call, so it is safe for callees
* to modify.
*/
unsigned long nr_to_scan;

/* shrink from these nodes */
@@ -27,11 +28,6 @@ struct shrink_control {
/*
* A callback you can register to apply pressure to ageable caches.
*
* @shrink() should look through the least-recently-used 'nr_to_scan' entries
* and attempt to free them up. It should return the number of objects which
* remain in the cache. If it returns -1, it means it cannot do any scanning at
* this time (eg. there is a risk of deadlock).
*
* @count_objects should return the number of freeable items in the cache. If
* there are no objects to free or the number of freeable items cannot be
* determined, it should return 0. No deadlock checks should be done during the
@@ -50,7 +46,6 @@ struct shrink_control {
* @flags determine the shrinker abilities, like numa awareness
*/
struct shrinker {
int (*shrink)(struct shrinker *, struct shrink_control *sc);
unsigned long (*count_objects)(struct shrinker *,
struct shrink_control *sc);
unsigned long (*scan_objects)(struct shrinker *,
4 changes: 2 additions & 2 deletions include/trace/events/vmscan.h
Original file line number Diff line number Diff line change
@@ -202,7 +202,7 @@ TRACE_EVENT(mm_shrink_slab_start,

TP_fast_assign(
__entry->shr = shr;
__entry->shrink = shr->shrink;
__entry->shrink = shr->scan_objects;
__entry->nr_objects_to_shrink = nr_objects_to_shrink;
__entry->gfp_flags = sc->gfp_mask;
__entry->pgs_scanned = pgs_scanned;
@@ -241,7 +241,7 @@ TRACE_EVENT(mm_shrink_slab_end,

TP_fast_assign(
__entry->shr = shr;
__entry->shrink = shr->shrink;
__entry->shrink = shr->scan_objects;
__entry->unused_scan = unused_scan_cnt;
__entry->new_scan = new_scan_cnt;
__entry->retval = shrinker_retval;
41 changes: 8 additions & 33 deletions mm/vmscan.c
Original file line number Diff line number Diff line change
@@ -194,14 +194,6 @@ void unregister_shrinker(struct shrinker *shrinker)
}
EXPORT_SYMBOL(unregister_shrinker);

static inline int do_shrinker_shrink(struct shrinker *shrinker,
struct shrink_control *sc,
unsigned long nr_to_scan)
{
sc->nr_to_scan = nr_to_scan;
return (*shrinker->shrink)(shrinker, sc);
}

#define SHRINK_BATCH 128

static unsigned long
@@ -218,10 +210,7 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;

if (shrinker->count_objects)
max_pass = shrinker->count_objects(shrinker, shrinkctl);
else
max_pass = do_shrinker_shrink(shrinker, shrinkctl, 0);
max_pass = shrinker->count_objects(shrinker, shrinkctl);
if (max_pass == 0)
return 0;

@@ -240,7 +229,7 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
if (total_scan < 0) {
printk(KERN_ERR
"shrink_slab: %pF negative objects to delete nr=%ld\n",
shrinker->shrink, total_scan);
shrinker->scan_objects, total_scan);
total_scan = max_pass;
}

@@ -272,27 +261,13 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
max_pass, delta, total_scan);

while (total_scan >= batch_size) {
unsigned long ret;

if (shrinker->scan_objects) {
unsigned long ret;
shrinkctl->nr_to_scan = batch_size;
ret = shrinker->scan_objects(shrinker, shrinkctl);

if (ret == SHRINK_STOP)
break;
freed += ret;
} else {
int nr_before;
long ret;

nr_before = do_shrinker_shrink(shrinker, shrinkctl, 0);
ret = do_shrinker_shrink(shrinker, shrinkctl,
batch_size);
if (ret == -1)
break;
if (ret < nr_before)
freed += nr_before - ret;
}
shrinkctl->nr_to_scan = batch_size;
ret = shrinker->scan_objects(shrinker, shrinkctl);
if (ret == SHRINK_STOP)
break;
freed += ret;

count_vm_events(SLABS_SCANNED, batch_size);
total_scan -= batch_size;

0 comments on commit a0b0213

Please sign in to comment.