diff --git a/[refs] b/[refs] index 9ca82640acab..0a7e491137cc 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: ee1039307a8a64b038f9b8cdc6f9120ecd9dfe9b +refs/heads/master: 7f8275d0d660c146de6ee3017e1e2e594c49e820 diff --git a/trunk/arch/x86/kvm/mmu.c b/trunk/arch/x86/kvm/mmu.c index 3699613e8830..b1ed0a1a5913 100644 --- a/trunk/arch/x86/kvm/mmu.c +++ b/trunk/arch/x86/kvm/mmu.c @@ -2926,7 +2926,7 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm) return kvm_mmu_zap_page(kvm, page) + 1; } -static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) +static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) { struct kvm *kvm; struct kvm *kvm_freed = NULL; diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index 8757ecf6e96b..e7018708cc31 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -4978,7 +4978,7 @@ i915_gpu_is_active(struct drm_device *dev) } static int -i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) +i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) { drm_i915_private_t *dev_priv, *next_dev; struct drm_i915_gem_object *obj_priv, *next_obj; diff --git a/trunk/fs/btrfs/ctree.c b/trunk/fs/btrfs/ctree.c index c3df14ce2cc2..0d1d966b0fe4 100644 --- a/trunk/fs/btrfs/ctree.c +++ b/trunk/fs/btrfs/ctree.c @@ -2304,17 +2304,12 @@ noinline int btrfs_leaf_free_space(struct btrfs_root *root, return ret; } -/* - * min slot controls the lowest index we're willing to push to the - * right. We'll push up to and including min_slot, but no lower - */ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int data_size, int empty, struct extent_buffer *right, - int free_space, u32 left_nritems, - u32 min_slot) + int free_space, u32 left_nritems) { struct extent_buffer *left = path->nodes[0]; struct extent_buffer *upper = path->nodes[1]; @@ -2332,7 +2327,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, if (empty) nr = 0; else - nr = max_t(u32, 1, min_slot); + nr = 1; if (path->slots[0] >= left_nritems) push_space += data_size; @@ -2474,14 +2469,10 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, * * returns 1 if the push failed because the other node didn't have enough * room, 0 if everything worked out and < 0 if there were major errors. - * - * this will push starting from min_slot to the end of the leaf. It won't - * push any slot lower than min_slot */ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root - *root, struct btrfs_path *path, - int min_data_size, int data_size, - int empty, u32 min_slot) + *root, struct btrfs_path *path, int data_size, + int empty) { struct extent_buffer *left = path->nodes[0]; struct extent_buffer *right; @@ -2523,8 +2514,8 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root if (left_nritems == 0) goto out_unlock; - return __push_leaf_right(trans, root, path, min_data_size, empty, - right, free_space, left_nritems, min_slot); + return __push_leaf_right(trans, root, path, data_size, empty, + right, free_space, left_nritems); out_unlock: btrfs_tree_unlock(right); free_extent_buffer(right); @@ -2534,17 +2525,12 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root /* * push some data in the path leaf to the left, trying to free up at * least data_size bytes. returns zero if the push worked, nonzero otherwise - * - * max_slot can put a limit on how far into the leaf we'll push items. The - * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the - * items */ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int data_size, int empty, struct extent_buffer *left, - int free_space, u32 right_nritems, - u32 max_slot) + int free_space, int right_nritems) { struct btrfs_disk_key disk_key; struct extent_buffer *right = path->nodes[0]; @@ -2563,9 +2549,9 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, slot = path->slots[1]; if (empty) - nr = min(right_nritems, max_slot); + nr = right_nritems; else - nr = min(right_nritems - 1, max_slot); + nr = right_nritems - 1; for (i = 0; i < nr; i++) { item = btrfs_item_nr(right, i); @@ -2726,14 +2712,10 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, /* * push some data in the path leaf to the left, trying to free up at * least data_size bytes. returns zero if the push worked, nonzero otherwise - * - * max_slot can put a limit on how far into the leaf we'll push items. The - * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the - * items */ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root - *root, struct btrfs_path *path, int min_data_size, - int data_size, int empty, u32 max_slot) + *root, struct btrfs_path *path, int data_size, + int empty) { struct extent_buffer *right = path->nodes[0]; struct extent_buffer *left; @@ -2779,9 +2761,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root goto out; } - return __push_leaf_left(trans, root, path, min_data_size, - empty, left, free_space, right_nritems, - max_slot); + return __push_leaf_left(trans, root, path, data_size, + empty, left, free_space, right_nritems); out: btrfs_tree_unlock(left); free_extent_buffer(left); @@ -2873,64 +2854,6 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans, return ret; } -/* - * double splits happen when we need to insert a big item in the middle - * of a leaf. A double split can leave us with 3 mostly empty leaves: - * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] - * A B C - * - * We avoid this by trying to push the items on either side of our target - * into the adjacent leaves. If all goes well we can avoid the double split - * completely. - */ -static noinline int push_for_double_split(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - int data_size) -{ - int ret; - int progress = 0; - int slot; - u32 nritems; - - slot = path->slots[0]; - - /* - * try to push all the items after our slot into the - * right leaf - */ - ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot); - if (ret < 0) - return ret; - - if (ret == 0) - progress++; - - nritems = btrfs_header_nritems(path->nodes[0]); - /* - * our goal is to get our slot at the start or end of a leaf. If - * we've done so we're done - */ - if (path->slots[0] == 0 || path->slots[0] == nritems) - return 0; - - if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) - return 0; - - /* try to push all the items before our slot into the next leaf */ - slot = path->slots[0]; - ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot); - if (ret < 0) - return ret; - - if (ret == 0) - progress++; - - if (progress) - return 0; - return 1; -} - /* * split the path's leaf in two, making sure there is at least data_size * available for the resulting leaf level of the path. @@ -2953,7 +2876,6 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, int wret; int split; int num_doubles = 0; - int tried_avoid_double = 0; l = path->nodes[0]; slot = path->slots[0]; @@ -2962,14 +2884,12 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, return -EOVERFLOW; /* first try to make some room by pushing left and right */ - if (data_size) { - wret = push_leaf_right(trans, root, path, data_size, - data_size, 0, 0); + if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) { + wret = push_leaf_right(trans, root, path, data_size, 0); if (wret < 0) return wret; if (wret) { - wret = push_leaf_left(trans, root, path, data_size, - data_size, 0, (u32)-1); + wret = push_leaf_left(trans, root, path, data_size, 0); if (wret < 0) return wret; } @@ -3003,8 +2923,6 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, if (mid != nritems && leaf_space_used(l, mid, nritems - mid) + data_size > BTRFS_LEAF_DATA_SIZE(root)) { - if (data_size && !tried_avoid_double) - goto push_for_double; split = 2; } } @@ -3021,8 +2939,6 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, if (mid != nritems && leaf_space_used(l, mid, nritems - mid) + data_size > BTRFS_LEAF_DATA_SIZE(root)) { - if (data_size && !tried_avoid_double) - goto push_for_double; split = 2 ; } } @@ -3103,13 +3019,6 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, } return ret; - -push_for_double: - push_for_double_split(trans, root, path, data_size); - tried_avoid_double = 1; - if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) - return 0; - goto again; } static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, @@ -4006,15 +3915,13 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, extent_buffer_get(leaf); btrfs_set_path_blocking(path); - wret = push_leaf_left(trans, root, path, 1, 1, - 1, (u32)-1); + wret = push_leaf_left(trans, root, path, 1, 1); if (wret < 0 && wret != -ENOSPC) ret = wret; if (path->nodes[0] == leaf && btrfs_header_nritems(leaf)) { - wret = push_leaf_right(trans, root, path, 1, - 1, 1, 0); + wret = push_leaf_right(trans, root, path, 1, 1); if (wret < 0 && wret != -ENOSPC) ret = wret; } diff --git a/trunk/fs/btrfs/ioctl.c b/trunk/fs/btrfs/ioctl.c index 9254b3d58dbe..4dbaf89b1337 100644 --- a/trunk/fs/btrfs/ioctl.c +++ b/trunk/fs/btrfs/ioctl.c @@ -1458,7 +1458,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, */ /* the destination must be opened for writing */ - if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) + if (!(file->f_mode & FMODE_WRITE)) return -EINVAL; ret = mnt_want_write(file->f_path.mnt); @@ -1511,7 +1511,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, /* determine range to clone */ ret = -EINVAL; - if (off + len > src->i_size || off + len < off) + if (off >= src->i_size || off + len > src->i_size) goto out_unlock; if (len == 0) olen = len = src->i_size - off; @@ -1578,7 +1578,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, u64 disko = 0, diskl = 0; u64 datao = 0, datal = 0; u8 comp; - u64 endoff; size = btrfs_item_size_nr(leaf, slot); read_extent_buffer(leaf, buf, @@ -1713,18 +1712,9 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, btrfs_release_path(root, path); inode->i_mtime = inode->i_ctime = CURRENT_TIME; - - /* - * we round up to the block size at eof when - * determining which extents to clone above, - * but shouldn't round up the file size - */ - endoff = new_key.offset + datal; - if (endoff > off+olen) - endoff = off+olen; - if (endoff > inode->i_size) - btrfs_i_size_write(inode, endoff); - + if (new_key.offset + datal > inode->i_size) + btrfs_i_size_write(inode, + new_key.offset + datal); BTRFS_I(inode)->flags = BTRFS_I(src)->flags; ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); diff --git a/trunk/fs/dcache.c b/trunk/fs/dcache.c index c8c78ba07827..86d4db15473e 100644 --- a/trunk/fs/dcache.c +++ b/trunk/fs/dcache.c @@ -896,7 +896,7 @@ EXPORT_SYMBOL(shrink_dcache_parent); * * In this case we return -1 to tell the caller that we baled. */ -static int shrink_dcache_memory(int nr, gfp_t gfp_mask) +static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) { if (nr) { if (!(gfp_mask & __GFP_FS)) diff --git a/trunk/fs/gfs2/glock.c b/trunk/fs/gfs2/glock.c index dbab3fdc2582..0898f3ec8212 100644 --- a/trunk/fs/gfs2/glock.c +++ b/trunk/fs/gfs2/glock.c @@ -1358,7 +1358,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) } -static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) +static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) { struct gfs2_glock *gl; int may_demote; diff --git a/trunk/fs/gfs2/quota.c b/trunk/fs/gfs2/quota.c index b256d6f24288..8f02d3db8f42 100644 --- a/trunk/fs/gfs2/quota.c +++ b/trunk/fs/gfs2/quota.c @@ -77,7 +77,7 @@ static LIST_HEAD(qd_lru_list); static atomic_t qd_lru_count = ATOMIC_INIT(0); static DEFINE_SPINLOCK(qd_lru_lock); -int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask) +int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) { struct gfs2_quota_data *qd; struct gfs2_sbd *sdp; diff --git a/trunk/fs/gfs2/quota.h b/trunk/fs/gfs2/quota.h index 195f60c8bd14..e7d236ca48bd 100644 --- a/trunk/fs/gfs2/quota.h +++ b/trunk/fs/gfs2/quota.h @@ -51,7 +51,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) return ret; } -extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask); +extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask); extern const struct quotactl_ops gfs2_quotactl_ops; #endif /* __QUOTA_DOT_H__ */ diff --git a/trunk/fs/inode.c b/trunk/fs/inode.c index 2bee20ae3d65..722860b323a9 100644 --- a/trunk/fs/inode.c +++ b/trunk/fs/inode.c @@ -512,7 +512,7 @@ static void prune_icache(int nr_to_scan) * This function is passed the number of inodes to scan, and it returns the * total number of remaining possibly-reclaimable inodes. */ -static int shrink_icache_memory(int nr, gfp_t gfp_mask) +static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) { if (nr) { /* diff --git a/trunk/fs/mbcache.c b/trunk/fs/mbcache.c index ec88ff3d04a9..e28f21b95344 100644 --- a/trunk/fs/mbcache.c +++ b/trunk/fs/mbcache.c @@ -115,7 +115,7 @@ mb_cache_indexes(struct mb_cache *cache) * What the mbcache registers as to get shrunk dynamically. */ -static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask); +static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); static struct shrinker mb_cache_shrinker = { .shrink = mb_cache_shrink_fn, @@ -191,13 +191,14 @@ __mb_cache_entry_release_unlock(struct mb_cache_entry *ce) * This function is called by the kernel memory management when memory * gets low. * + * @shrink: (ignored) * @nr_to_scan: Number of objects to scan * @gfp_mask: (ignored) * * Returns the number of objects which are present in the cache. */ static int -mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask) +mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) { LIST_HEAD(free_list); struct list_head *l, *ltmp; diff --git a/trunk/fs/nfs/dir.c b/trunk/fs/nfs/dir.c index 782b431ef91c..e60416d3f818 100644 --- a/trunk/fs/nfs/dir.c +++ b/trunk/fs/nfs/dir.c @@ -1710,7 +1710,7 @@ static void nfs_access_free_list(struct list_head *head) } } -int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) +int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) { LIST_HEAD(head); struct nfs_inode *nfsi; diff --git a/trunk/fs/nfs/internal.h b/trunk/fs/nfs/internal.h index d8bd619e386c..e70f44b9b3f4 100644 --- a/trunk/fs/nfs/internal.h +++ b/trunk/fs/nfs/internal.h @@ -205,7 +205,8 @@ extern struct rpc_procinfo nfs4_procedures[]; void nfs_close_context(struct nfs_open_context *ctx, int is_sync); /* dir.c */ -extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask); +extern int nfs_access_cache_shrinker(struct shrinker *shrink, + int nr_to_scan, gfp_t gfp_mask); /* inode.c */ extern struct workqueue_struct *nfsiod_workqueue; diff --git a/trunk/fs/quota/dquot.c b/trunk/fs/quota/dquot.c index 12c233da1b6b..437d2ca2de97 100644 --- a/trunk/fs/quota/dquot.c +++ b/trunk/fs/quota/dquot.c @@ -676,7 +676,7 @@ static void prune_dqcache(int count) * This is called from kswapd when we think we need some * more memory */ -static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) +static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) { if (nr) { spin_lock(&dq_list_lock); diff --git a/trunk/fs/ubifs/shrinker.c b/trunk/fs/ubifs/shrinker.c index 02feb59cefca..0b201114a5ad 100644 --- a/trunk/fs/ubifs/shrinker.c +++ b/trunk/fs/ubifs/shrinker.c @@ -277,7 +277,7 @@ static int kick_a_thread(void) return 0; } -int ubifs_shrinker(int nr, gfp_t gfp_mask) +int ubifs_shrinker(struct shrinker *shrink, int nr, gfp_t gfp_mask) { int freed, contention = 0; long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); diff --git a/trunk/fs/ubifs/ubifs.h b/trunk/fs/ubifs/ubifs.h index 2eef553d50c8..04310878f449 100644 --- a/trunk/fs/ubifs/ubifs.h +++ b/trunk/fs/ubifs/ubifs.h @@ -1575,7 +1575,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot); int ubifs_tnc_end_commit(struct ubifs_info *c); /* shrinker.c */ -int ubifs_shrinker(int nr_to_scan, gfp_t gfp_mask); +int ubifs_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); /* commit.c */ int ubifs_bg_thread(void *info); diff --git a/trunk/fs/xfs/linux-2.6/xfs_buf.c b/trunk/fs/xfs/linux-2.6/xfs_buf.c index 649ade8ef598..2ee3f7a60163 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_buf.c +++ b/trunk/fs/xfs/linux-2.6/xfs_buf.c @@ -45,7 +45,7 @@ static kmem_zone_t *xfs_buf_zone; STATIC int xfsbufd(void *); -STATIC int xfsbufd_wakeup(int, gfp_t); +STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t); STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); static struct shrinker xfs_buf_shake = { .shrink = xfsbufd_wakeup, @@ -340,7 +340,7 @@ _xfs_buf_lookup_pages( __func__, gfp_mask); XFS_STATS_INC(xb_page_retries); - xfsbufd_wakeup(0, gfp_mask); + xfsbufd_wakeup(NULL, 0, gfp_mask); congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry; } @@ -1762,6 +1762,7 @@ xfs_buf_runall_queues( STATIC int xfsbufd_wakeup( + struct shrinker *shrink, int priority, gfp_t mask) { diff --git a/trunk/fs/xfs/linux-2.6/xfs_sync.c b/trunk/fs/xfs/linux-2.6/xfs_sync.c index ef7f0218bccb..be375827af98 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_sync.c +++ b/trunk/fs/xfs/linux-2.6/xfs_sync.c @@ -838,6 +838,7 @@ static struct rw_semaphore xfs_mount_list_lock; static int xfs_reclaim_inode_shrink( + struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) { diff --git a/trunk/fs/xfs/quota/xfs_qm.c b/trunk/fs/xfs/quota/xfs_qm.c index 8c117ff2e3ab..67c018392d62 100644 --- a/trunk/fs/xfs/quota/xfs_qm.c +++ b/trunk/fs/xfs/quota/xfs_qm.c @@ -69,7 +69,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); STATIC int xfs_qm_init_quotainos(xfs_mount_t *); STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); -STATIC int xfs_qm_shake(int, gfp_t); +STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t); static struct shrinker xfs_qm_shaker = { .shrink = xfs_qm_shake, @@ -2117,7 +2117,10 @@ xfs_qm_shake_freelist( */ /* ARGSUSED */ STATIC int -xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask) +xfs_qm_shake( + struct shrinker *shrink, + int nr_to_scan, + gfp_t gfp_mask) { int ndqused, nfree, n; diff --git a/trunk/include/linux/mm.h b/trunk/include/linux/mm.h index b969efb03787..a2b48041b910 100644 --- a/trunk/include/linux/mm.h +++ b/trunk/include/linux/mm.h @@ -999,7 +999,7 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) * querying the cache size, so a fastpath for that case is appropriate. */ struct shrinker { - int (*shrink)(int nr_to_scan, gfp_t gfp_mask); + int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask); int seeks; /* seeks to recreate an obj */ /* These are for internal use */ diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index 9c7e57cc63a3..199fa436c0dd 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -213,8 +213,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, list_for_each_entry(shrinker, &shrinker_list, list) { unsigned long long delta; unsigned long total_scan; - unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask); + unsigned long max_pass; + max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask); delta = (4 * scanned) / shrinker->seeks; delta *= max_pass; do_div(delta, lru_pages + 1); @@ -242,8 +243,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, int shrink_ret; int nr_before; - nr_before = (*shrinker->shrink)(0, gfp_mask); - shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask); + nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask); + shrink_ret = (*shrinker->shrink)(shrinker, this_scan, + gfp_mask); if (shrink_ret == -1) break; if (shrink_ret < nr_before)