Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
Browse files Browse the repository at this point in the history
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
  Btrfs: hold trans_mutex when using btrfs_record_root_in_trans
  Btrfs: make a lockdep class for the extent buffer locks
  Btrfs: fs/btrfs/volumes.c: remove useless kzalloc
  Btrfs: remove unused code in split_state()
  Btrfs: remove btrfs_init_path
  Btrfs: balance_level checks !child after access
  Btrfs: Avoid using __GFP_HIGHMEM with slab allocator
  Btrfs: don't clean old snapshots on sync(1)
  Btrfs: use larger metadata clusters in ssd mode
  Btrfs: process mount options on mount -o remount,
  Btrfs: make sure all pending extent operations are complete
  • Loading branch information
Linus Torvalds committed Feb 17, 2009
2 parents 0637810 + 2456242 commit 48c0d9e
Show file tree
Hide file tree
Showing 13 changed files with 157 additions and 84 deletions.
58 changes: 37 additions & 21 deletions fs/btrfs/ctree.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,19 +38,12 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);

inline void btrfs_init_path(struct btrfs_path *p)
{
memset(p, 0, sizeof(*p));
}

struct btrfs_path *btrfs_alloc_path(void)
{
struct btrfs_path *path;
path = kmem_cache_alloc(btrfs_path_cachep, GFP_NOFS);
if (path) {
btrfs_init_path(path);
path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
if (path)
path->reada = 1;
}
return path;
}

Expand All @@ -69,14 +62,38 @@ noinline void btrfs_set_path_blocking(struct btrfs_path *p)

/*
* reset all the locked nodes in the patch to spinning locks.
*
* held is used to keep lockdep happy, when lockdep is enabled
* we set held to a blocking lock before we go around and
* retake all the spinlocks in the path. You can safely use NULL
* for held
*/
noinline void btrfs_clear_path_blocking(struct btrfs_path *p)
noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
struct extent_buffer *held)
{
int i;
for (i = 0; i < BTRFS_MAX_LEVEL; i++) {

#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* lockdep really cares that we take all of these spinlocks
* in the right order. If any of the locks in the path are not
* currently blocking, it is going to complain. So, make really
* really sure by forcing the path to blocking before we clear
* the path blocking.
*/
if (held)
btrfs_set_lock_blocking(held);
btrfs_set_path_blocking(p);
#endif

for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
if (p->nodes[i] && p->locks[i])
btrfs_clear_lock_blocking(p->nodes[i]);
}

#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (held)
btrfs_clear_lock_blocking(held);
#endif
}

/* this also releases the path */
Expand Down Expand Up @@ -286,7 +303,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
trans->transid, level, &ins);
BUG_ON(ret);
cow = btrfs_init_new_buffer(trans, root, prealloc_dest,
buf->len);
buf->len, level);
} else {
cow = btrfs_alloc_free_block(trans, root, buf->len,
parent_start,
Expand Down Expand Up @@ -917,9 +934,9 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,

/* promote the child to a root */
child = read_node_slot(root, mid, 0);
BUG_ON(!child);
btrfs_tree_lock(child);
btrfs_set_lock_blocking(child);
BUG_ON(!child);
ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 0);
BUG_ON(ret);

Expand Down Expand Up @@ -1566,7 +1583,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
if (!p->skip_locking)
p->locks[level] = 1;

btrfs_clear_path_blocking(p);
btrfs_clear_path_blocking(p, NULL);

/*
* we have a lock on b and as long as we aren't changing
Expand Down Expand Up @@ -1605,7 +1622,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root

btrfs_set_path_blocking(p);
sret = split_node(trans, root, p, level);
btrfs_clear_path_blocking(p);
btrfs_clear_path_blocking(p, NULL);

BUG_ON(sret > 0);
if (sret) {
Expand All @@ -1625,7 +1642,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root

btrfs_set_path_blocking(p);
sret = balance_level(trans, root, p, level);
btrfs_clear_path_blocking(p);
btrfs_clear_path_blocking(p, NULL);

if (sret) {
ret = sret;
Expand Down Expand Up @@ -1688,13 +1705,13 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
if (!p->skip_locking) {
int lret;

btrfs_clear_path_blocking(p);
btrfs_clear_path_blocking(p, NULL);
lret = btrfs_try_spin_lock(b);

if (!lret) {
btrfs_set_path_blocking(p);
btrfs_tree_lock(b);
btrfs_clear_path_blocking(p);
btrfs_clear_path_blocking(p, b);
}
}
} else {
Expand All @@ -1706,7 +1723,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_set_path_blocking(p);
sret = split_leaf(trans, root, key,
p, ins_len, ret == 0);
btrfs_clear_path_blocking(p);
btrfs_clear_path_blocking(p, NULL);

BUG_ON(sret > 0);
if (sret) {
Expand Down Expand Up @@ -3926,7 +3943,6 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
btrfs_release_path(root, path);
goto again;
} else {
btrfs_clear_path_blocking(path);
goto out;
}
}
Expand All @@ -3946,7 +3962,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
path->locks[level - 1] = 1;
path->nodes[level - 1] = cur;
unlock_up(path, level, 1);
btrfs_clear_path_blocking(path);
btrfs_clear_path_blocking(path, NULL);
}
out:
if (ret == 0)
Expand Down
11 changes: 3 additions & 8 deletions fs/btrfs/ctree.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,7 @@ struct btrfs_ordered_sum;

#define BTRFS_ACL_NOT_CACHED ((void *)-1)

#ifdef CONFIG_LOCKDEP
# define BTRFS_MAX_LEVEL 7
#else
# define BTRFS_MAX_LEVEL 8
#endif
#define BTRFS_MAX_LEVEL 8

/* holds pointers to all of the tree roots */
#define BTRFS_ROOT_TREE_OBJECTID 1ULL
Expand Down Expand Up @@ -1715,7 +1711,8 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
u64 empty_size);
struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u32 blocksize);
u64 bytenr, u32 blocksize,
int level);
int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 num_bytes, u64 parent, u64 min_bytes,
Expand Down Expand Up @@ -1834,9 +1831,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
void btrfs_init_path(struct btrfs_path *p);
void btrfs_set_path_blocking(struct btrfs_path *p);
void btrfs_clear_path_blocking(struct btrfs_path *p);
void btrfs_unlock_up_safe(struct btrfs_path *p, int level);

int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
Expand Down
46 changes: 45 additions & 1 deletion fs/btrfs/disk-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,40 @@ struct async_submit_bio {
struct btrfs_work work;
};

/* These are used to set the lockdep class on the extent buffer locks.
* The class is set by the readpage_end_io_hook after the buffer has
* passed csum validation but before the pages are unlocked.
*
* The lockdep class is also set by btrfs_init_new_buffer on freshly
* allocated blocks.
*
* The class is based on the level in the tree block, which allows lockdep
* to know that lower nodes nest inside the locks of higher nodes.
*
* We also add a check to make sure the highest level of the tree is
* the same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this
* code needs update as well.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# if BTRFS_MAX_LEVEL != 8
# error
# endif
static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1];
static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
/* leaf */
"btrfs-extent-00",
"btrfs-extent-01",
"btrfs-extent-02",
"btrfs-extent-03",
"btrfs-extent-04",
"btrfs-extent-05",
"btrfs-extent-06",
"btrfs-extent-07",
/* highest possible level */
"btrfs-extent-08",
};
#endif

/*
* extents on the btree inode are pretty simple, there's one extent
* that covers the entire device
Expand Down Expand Up @@ -347,6 +381,15 @@ static int check_tree_block_fsid(struct btrfs_root *root,
return ret;
}

#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
{
lockdep_set_class_and_name(&eb->lock,
&btrfs_eb_class[level],
btrfs_eb_name[level]);
}
#endif

static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state)
{
Expand Down Expand Up @@ -392,6 +435,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
}
found_level = btrfs_header_level(eb);

btrfs_set_buffer_lockdep_class(eb, found_level);

ret = csum_tree_block(root, eb, 1);
if (ret)
ret = -EIO;
Expand Down Expand Up @@ -1777,7 +1822,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
ret = find_and_setup_root(tree_root, fs_info,
BTRFS_DEV_TREE_OBJECTID, dev_root);
dev_root->track_dirty = 1;

if (ret)
goto fail_extent_root;

Expand Down
10 changes: 10 additions & 0 deletions fs/btrfs/disk-io.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,4 +101,14 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btree_lock_page_hook(struct page *page);


#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level);
#else
static inline void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb,
int level)
{
}
#endif
#endif
Loading

0 comments on commit 48c0d9e

Please sign in to comment.