Skip to content

Commit

Permalink
Btrfs: setup free ino caching in a more asynchronous way
Browse files Browse the repository at this point in the history
For a filesystem that has lots of files in it, the first time we mount
it with free ino caching support, it can take quite a long time to
setup the caching before we can create new files.

Here we fill the cache with [highest_ino, BTRFS_LAST_FREE_OBJECTID]
before we start the caching thread to search through the extent tree.

Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
  • Loading branch information
Li Zefan authored and Chris Mason committed May 26, 2011
1 parent 00d01bc commit a47d6b7
Showing 1 changed file with 22 additions and 6 deletions.
28 changes: 22 additions & 6 deletions fs/btrfs/inode-map.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,12 +60,12 @@ static int caching_kthread(void *data)

while (1) {
smp_mb();
if (fs_info->closing > 1)
if (fs_info->closing)
goto out;

leaf = path->nodes[0];
slot = path->slots[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
goto out;
Expand Down Expand Up @@ -100,7 +100,7 @@ static int caching_kthread(void *data)
if (key.type != BTRFS_INODE_ITEM_KEY)
goto next;

if (key.objectid >= BTRFS_LAST_FREE_OBJECTID)
if (key.objectid >= root->highest_objectid)
break;

if (last != (u64)-1 && last + 1 != key.objectid) {
Expand All @@ -114,9 +114,9 @@ static int caching_kthread(void *data)
path->slots[0]++;
}

if (last < BTRFS_LAST_FREE_OBJECTID - 1) {
if (last < root->highest_objectid - 1) {
__btrfs_add_free_space(ctl, last + 1,
BTRFS_LAST_FREE_OBJECTID - last - 1);
root->highest_objectid - last - 1);
}

spin_lock(&root->cache_lock);
Expand All @@ -136,8 +136,10 @@ static int caching_kthread(void *data)

static void start_caching(struct btrfs_root *root)
{
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
struct task_struct *tsk;
int ret;
u64 objectid;

spin_lock(&root->cache_lock);
if (root->cached != BTRFS_CACHE_NO) {
Expand All @@ -156,6 +158,19 @@ static void start_caching(struct btrfs_root *root)
return;
}

/*
* It can be quite time-consuming to fill the cache by searching
* through the extent tree, and this can keep ino allocation path
* waiting. Therefore at start we quickly find out the highest
* inode number and we know we can use inode numbers which fall in
* [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID].
*/
ret = btrfs_find_free_objectid(root, &objectid);
if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
__btrfs_add_free_space(ctl, objectid,
BTRFS_LAST_FREE_OBJECTID - objectid + 1);
}

tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
root->root_key.objectid);
BUG_ON(IS_ERR(tsk));
Expand Down Expand Up @@ -209,7 +224,8 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid)

start_caching(root);

if (objectid <= root->cache_progress)
if (objectid <= root->cache_progress ||
objectid > root->highest_objectid)
__btrfs_add_free_space(ctl, objectid, 1);
else
__btrfs_add_free_space(pinned, objectid, 1);
Expand Down

0 comments on commit a47d6b7

Please sign in to comment.