Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 257368
b: refs/heads/master
c: b0d40c9
h: refs/heads/master
v: v3
  • Loading branch information
Dave Chinner authored and Al Viro committed Jul 21, 2011
1 parent 0a62310 commit 4ef68fe
Show file tree
Hide file tree
Showing 7 changed files with 122 additions and 258 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 12ad3ab66103e6582ca69c0c9de18b13487eaaef
refs/heads/master: b0d40c92adafde7c2d81203ce7c1c69275f41140
121 changes: 12 additions & 109 deletions trunk/fs/dcache.c
Original file line number Diff line number Diff line change
Expand Up @@ -743,13 +743,11 @@ static void shrink_dentry_list(struct list_head *list)
*
* If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
*/
static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
static void __shrink_dcache_sb(struct super_block *sb, int count, int flags)
{
/* called from prune_dcache() and shrink_dcache_parent() */
struct dentry *dentry;
LIST_HEAD(referenced);
LIST_HEAD(tmp);
int cnt = *count;

relock:
spin_lock(&dcache_lru_lock);
Expand Down Expand Up @@ -777,7 +775,7 @@ static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
} else {
list_move_tail(&dentry->d_lru, &tmp);
spin_unlock(&dentry->d_lock);
if (!--cnt)
if (!--count)
break;
}
cond_resched_lock(&dcache_lru_lock);
Expand All @@ -787,83 +785,22 @@ static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
spin_unlock(&dcache_lru_lock);

shrink_dentry_list(&tmp);

*count = cnt;
}

/**
* prune_dcache - shrink the dcache
* @count: number of entries to try to free
* prune_dcache_sb - shrink the dcache
* @nr_to_scan: number of entries to try to free
*
* Shrink the dcache. This is done when we need more memory, or simply when we
* need to unmount something (at which point we need to unuse all dentries).
* Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
* done when we need more memory an called from the superblock shrinker
* function.
*
* This function may fail to free any resources if all the dentries are in use.
* This function may fail to free any resources if all the dentries are in
* use.
*/
static void prune_dcache(int count)
void prune_dcache_sb(struct super_block *sb, int nr_to_scan)
{
struct super_block *sb, *p = NULL;
int w_count;
int unused = dentry_stat.nr_unused;
int prune_ratio;
int pruned;

if (unused == 0 || count == 0)
return;
if (count >= unused)
prune_ratio = 1;
else
prune_ratio = unused / count;
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
if (sb->s_nr_dentry_unused == 0)
continue;
sb->s_count++;
/* Now, we reclaim unused dentrins with fairness.
* We reclaim them same percentage from each superblock.
* We calculate number of dentries to scan on this sb
* as follows, but the implementation is arranged to avoid
* overflows:
* number of dentries to scan on this sb =
* count * (number of dentries on this sb /
* number of dentries in the machine)
*/
spin_unlock(&sb_lock);
if (prune_ratio != 1)
w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1;
else
w_count = sb->s_nr_dentry_unused;
pruned = w_count;
/*
* We need to be sure this filesystem isn't being unmounted,
* otherwise we could race with generic_shutdown_super(), and
* end up holding a reference to an inode while the filesystem
* is unmounted. So we try to get s_umount, and make sure
* s_root isn't NULL.
*/
if (down_read_trylock(&sb->s_umount)) {
if ((sb->s_root != NULL) &&
(!list_empty(&sb->s_dentry_lru))) {
__shrink_dcache_sb(sb, &w_count,
DCACHE_REFERENCED);
pruned -= w_count;
}
up_read(&sb->s_umount);
}
spin_lock(&sb_lock);
if (p)
__put_super(p);
count -= pruned;
p = sb;
/* more work left to do? */
if (count <= 0)
break;
}
if (p)
__put_super(p);
spin_unlock(&sb_lock);
__shrink_dcache_sb(sb, nr_to_scan, DCACHE_REFERENCED);
}

/**
Expand Down Expand Up @@ -1238,42 +1175,10 @@ void shrink_dcache_parent(struct dentry * parent)
int found;

while ((found = select_parent(parent)) != 0)
__shrink_dcache_sb(sb, &found, 0);
__shrink_dcache_sb(sb, found, 0);
}
EXPORT_SYMBOL(shrink_dcache_parent);

/*
* Scan `sc->nr_slab_to_reclaim' dentries and return the number which remain.
*
* We need to avoid reentering the filesystem if the caller is performing a
* GFP_NOFS allocation attempt. One example deadlock is:
*
* ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache->
* prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode->
* ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK.
*
* In this case we return -1 to tell the caller that we baled.
*/
static int shrink_dcache_memory(struct shrinker *shrink,
struct shrink_control *sc)
{
int nr = sc->nr_to_scan;
gfp_t gfp_mask = sc->gfp_mask;

if (nr) {
if (!(gfp_mask & __GFP_FS))
return -1;
prune_dcache(nr);
}

return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}

static struct shrinker dcache_shrinker = {
.shrink = shrink_dcache_memory,
.seeks = DEFAULT_SEEKS,
};

/**
* __d_alloc - allocate a dcache entry
* @sb: filesystem it will belong to
Expand Down Expand Up @@ -3083,8 +2988,6 @@ static void __init dcache_init(void)
*/
dentry_cache = KMEM_CACHE(dentry,
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);

register_shrinker(&dcache_shrinker);

/* Hash may have been set up in dcache_init_early */
if (!hashdist)
Expand Down
117 changes: 9 additions & 108 deletions trunk/fs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
*
* We don't actually need it to protect anything in the umount path,
* but only need to cycle through it to make sure any inode that
* prune_icache took off the LRU list has been fully torn down by the
* prune_icache_sb took off the LRU list has been fully torn down by the
* time we are past evict_inodes.
*/
static DECLARE_RWSEM(iprune_sem);
Expand Down Expand Up @@ -544,7 +544,7 @@ void evict_inodes(struct super_block *sb)
dispose_list(&dispose);

/*
* Cycle through iprune_sem to make sure any inode that prune_icache
* Cycle through iprune_sem to make sure any inode that prune_icache_sb
* moved off the list before we took the lock has been fully torn
* down.
*/
Expand Down Expand Up @@ -612,9 +612,10 @@ static int can_unuse(struct inode *inode)
}

/*
* Scan `goal' inodes on the unused list for freeable ones. They are moved to a
* temporary list and then are freed outside sb->s_inode_lru_lock by
* dispose_list().
* Walk the superblock inode LRU for freeable inodes and attempt to free them.
* This is called from the superblock shrinker function with a number of inodes
* to trim from the LRU. Inodes to be freed are moved to a temporary list and
* then are freed outside inode_lock by dispose_list().
*
* Any inodes which are pinned purely because of attached pagecache have their
* pagecache removed. If the inode has metadata buffers attached to
Expand All @@ -628,14 +629,15 @@ static int can_unuse(struct inode *inode)
* LRU does not have strict ordering. Hence we don't want to reclaim inodes
* with this flag set because they are the inodes that are out of order.
*/
static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
void prune_icache_sb(struct super_block *sb, int nr_to_scan)
{
LIST_HEAD(freeable);
int nr_scanned;
unsigned long reap = 0;

down_read(&iprune_sem);
spin_lock(&sb->s_inode_lru_lock);
for (nr_scanned = *nr_to_scan; nr_scanned >= 0; nr_scanned--) {
for (nr_scanned = nr_to_scan; nr_scanned >= 0; nr_scanned--) {
struct inode *inode;

if (list_empty(&sb->s_inode_lru))
Expand Down Expand Up @@ -707,111 +709,11 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
else
__count_vm_events(PGINODESTEAL, reap);
spin_unlock(&sb->s_inode_lru_lock);
*nr_to_scan = nr_scanned;

dispose_list(&freeable);
}

static void prune_icache(int count)
{
struct super_block *sb, *p = NULL;
int w_count;
int unused = inodes_stat.nr_unused;
int prune_ratio;
int pruned;

if (unused == 0 || count == 0)
return;
down_read(&iprune_sem);
if (count >= unused)
prune_ratio = 1;
else
prune_ratio = unused / count;
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
if (sb->s_nr_inodes_unused == 0)
continue;
sb->s_count++;
/* Now, we reclaim unused dentrins with fairness.
* We reclaim them same percentage from each superblock.
* We calculate number of dentries to scan on this sb
* as follows, but the implementation is arranged to avoid
* overflows:
* number of dentries to scan on this sb =
* count * (number of dentries on this sb /
* number of dentries in the machine)
*/
spin_unlock(&sb_lock);
if (prune_ratio != 1)
w_count = (sb->s_nr_inodes_unused / prune_ratio) + 1;
else
w_count = sb->s_nr_inodes_unused;
pruned = w_count;
/*
* We need to be sure this filesystem isn't being unmounted,
* otherwise we could race with generic_shutdown_super(), and
* end up holding a reference to an inode while the filesystem
* is unmounted. So we try to get s_umount, and make sure
* s_root isn't NULL.
*/
if (down_read_trylock(&sb->s_umount)) {
if ((sb->s_root != NULL) &&
(!list_empty(&sb->s_dentry_lru))) {
shrink_icache_sb(sb, &w_count);
pruned -= w_count;
}
up_read(&sb->s_umount);
}
spin_lock(&sb_lock);
if (p)
__put_super(p);
count -= pruned;
p = sb;
/* more work left to do? */
if (count <= 0)
break;
}
if (p)
__put_super(p);
spin_unlock(&sb_lock);
up_read(&iprune_sem);
}

/*
* shrink_icache_memory() will attempt to reclaim some unused inodes. Here,
* "unused" means that no dentries are referring to the inodes: the files are
* not open and the dcache references to those inodes have already been
* reclaimed.
*
* This function is passed the number of inodes to scan, and it returns the
* total number of remaining possibly-reclaimable inodes.
*/
static int shrink_icache_memory(struct shrinker *shrink,
struct shrink_control *sc)
{
int nr = sc->nr_to_scan;
gfp_t gfp_mask = sc->gfp_mask;

if (nr) {
/*
* Nasty deadlock avoidance. We may hold various FS locks,
* and we don't want to recurse into the FS that called us
* in clear_inode() and friends..
*/
if (!(gfp_mask & __GFP_FS))
return -1;
prune_icache(nr);
}
return (get_nr_inodes_unused() / 100) * sysctl_vfs_cache_pressure;
}

static struct shrinker icache_shrinker = {
.shrink = shrink_icache_memory,
.seeks = DEFAULT_SEEKS,
};

static void __wait_on_freeing_inode(struct inode *inode);
/*
* Called with the inode lock held.
Expand Down Expand Up @@ -1691,7 +1593,6 @@ void __init inode_init(void)
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
SLAB_MEM_SPREAD),
init_once);
register_shrinker(&icache_shrinker);

/* Hash may have been set up in inode_init_early */
if (!hashdist)
Expand Down
Loading

0 comments on commit 4ef68fe

Please sign in to comment.