Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 234939
b: refs/heads/master
c: bab1d94
h: refs/heads/master
i:
  234937: f024ede
  234935: 02b0218
v: v3
  • Loading branch information
Christoph Hellwig authored and Al Viro committed Mar 16, 2011
1 parent c61de13 commit fa7c4c6
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 19 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5229645bdc35f1cc43eb8b25b6993c8fa58b4b43
refs/heads/master: bab1d9444d9a147f1dc3478dd06c16f490227f3e
32 changes: 14 additions & 18 deletions trunk/fs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,16 +84,13 @@ static struct hlist_head *inode_hashtable __read_mostly;
DEFINE_SPINLOCK(inode_lock);

/*
* iprune_sem provides exclusion between the kswapd or try_to_free_pages
* icache shrinking path, and the umount path. Without this exclusion,
* by the time prune_icache calls iput for the inode whose pages it has
* been invalidating, or by the time it calls clear_inode & destroy_inode
* from its final dispose_list, the struct super_block they refer to
* (for inode->i_sb->s_op) may already have been freed and reused.
* iprune_sem provides exclusion between the icache shrinking and the
* umount path.
*
* We make this an rwsem because the fastpath is icache shrinking. In
* some cases a filesystem may be doing a significant amount of work in
* its inode reclaim code, so this should improve parallelism.
* We don't actually need it to protect anything in the umount path,
* but only need to cycle through it to make sure any inode that
* prune_icache took off the LRU list has been fully torn down by the
* time we are past evict_inodes.
*/
static DECLARE_RWSEM(iprune_sem);

Expand Down Expand Up @@ -516,17 +513,12 @@ void evict_inodes(struct super_block *sb)
struct inode *inode, *next;
LIST_HEAD(dispose);

down_write(&iprune_sem);

spin_lock(&inode_lock);
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
if (atomic_read(&inode->i_count))
continue;

if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
WARN_ON(1);
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
continue;
}

inode->i_state |= I_FREEING;

Expand All @@ -542,6 +534,13 @@ void evict_inodes(struct super_block *sb)
spin_unlock(&inode_lock);

dispose_list(&dispose);

/*
* Cycle through iprune_sem to make sure any inode that prune_icache
* moved off the list before we took the lock has been fully torn
* down.
*/
down_write(&iprune_sem);
up_write(&iprune_sem);
}

Expand All @@ -561,8 +560,6 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
struct inode *inode, *next;
LIST_HEAD(dispose);

down_write(&iprune_sem);

spin_lock(&inode_lock);
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
Expand Down Expand Up @@ -590,7 +587,6 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
spin_unlock(&inode_lock);

dispose_list(&dispose);
up_write(&iprune_sem);

return busy;
}
Expand Down

0 comments on commit fa7c4c6

Please sign in to comment.