Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
Browse files Browse the repository at this point in the history
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: (24 commits)
  Btrfs: fix free space cache leak
  Btrfs: avoid taking the chunk_mutex in do_chunk_alloc
  Btrfs end_bio_extent_readpage should look for locked bits
  Btrfs: don't force chunk allocation in find_free_extent
  Btrfs: Check validity before setting an acl
  Btrfs: Fix incorrect inode nlink in btrfs_link()
  Btrfs: Check if btrfs_next_leaf() returns error in btrfs_real_readdir()
  Btrfs: Check if btrfs_next_leaf() returns error in btrfs_listxattr()
  Btrfs: make uncache_state unconditional
  btrfs: using cached extent_state in set/unlock combinations
  Btrfs: avoid taking the trans_mutex in btrfs_end_transaction
  Btrfs: fix subvolume mount by name problem when default mount subvolume is set
  fix user annotation in ioctl.c
  Btrfs: check for duplicate iov_base's when doing dio reads
  btrfs: properly handle overlapping areas in memmove_extent_buffer
  Btrfs: fix memory leaks in btrfs_new_inode()
  Btrfs: check for duplicate iov_base's when doing dio reads
  Btrfs: reuse the extent_map we found when calling btrfs_get_extent
  Btrfs: do not use async submit for small DIO io's
  Btrfs: don't split dio bios if we don't have to
  ...
  • Loading branch information
Linus Torvalds committed Apr 18, 2011
2 parents d8bdc59 + f65647c commit adff377
Show file tree
Hide file tree
Showing 14 changed files with 430 additions and 233 deletions.
9 changes: 5 additions & 4 deletions fs/btrfs/acl.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,16 +178,17 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,

if (value) {
acl = posix_acl_from_xattr(value, size);
if (acl == NULL) {
value = NULL;
size = 0;
if (acl) {
ret = posix_acl_valid(acl);
if (ret)
goto out;
} else if (IS_ERR(acl)) {
return PTR_ERR(acl);
}
}

ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type);

out:
posix_acl_release(acl);

return ret;
Expand Down
9 changes: 8 additions & 1 deletion fs/btrfs/ctree.h
Original file line number Diff line number Diff line change
Expand Up @@ -740,8 +740,10 @@ struct btrfs_space_info {
*/
unsigned long reservation_progress;

int full; /* indicates that we cannot allocate any more
int full:1; /* indicates that we cannot allocate any more
chunks for this space */
int chunk_alloc:1; /* set if we are allocating a chunk */

int force_alloc; /* set if we need to force a chunk alloc for
this space */

Expand Down Expand Up @@ -2576,6 +2578,11 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end);
int btrfs_release_file(struct inode *inode, struct file *file);
void btrfs_drop_pages(struct page **pages, size_t num_pages);
int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
struct page **pages, size_t num_pages,
loff_t pos, size_t write_bytes,
struct extent_state **cached);

/* tree-defrag.c */
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/disk-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -3057,7 +3057,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
btrfs_destroy_pinned_extent(root,
root->fs_info->pinned_extents);

t->use_count = 0;
atomic_set(&t->use_count, 0);
list_del_init(&t->list);
memset(t, 0, sizeof(*t));
kmem_cache_free(btrfs_transaction_cachep, t);
Expand Down
125 changes: 98 additions & 27 deletions fs/btrfs/extent-tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,25 @@
#include "locking.h"
#include "free-space-cache.h"

/* control flags for do_chunk_alloc's force field
* CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
* if we really need one.
*
* CHUNK_ALLOC_FORCE means it must try to allocate one
*
* CHUNK_ALLOC_LIMITED means to only try and allocate one
* if we have very few chunks already allocated. This is
* used as part of the clustering code to help make sure
* we have a good pool of storage to cluster in, without
* filling the FS with empty chunks
*
*/
enum {
CHUNK_ALLOC_NO_FORCE = 0,
CHUNK_ALLOC_FORCE = 1,
CHUNK_ALLOC_LIMITED = 2,
};

static int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc);
Expand Down Expand Up @@ -3019,7 +3038,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
found->bytes_readonly = 0;
found->bytes_may_use = 0;
found->full = 0;
found->force_alloc = 0;
found->force_alloc = CHUNK_ALLOC_NO_FORCE;
found->chunk_alloc = 0;
*space_info = found;
list_add_rcu(&found->list, &info->space_info);
atomic_set(&found->caching_threads, 0);
Expand Down Expand Up @@ -3150,7 +3170,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
if (!data_sinfo->full && alloc_chunk) {
u64 alloc_target;

data_sinfo->force_alloc = 1;
data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
spin_unlock(&data_sinfo->lock);
alloc:
alloc_target = btrfs_get_alloc_profile(root, 1);
Expand All @@ -3160,7 +3180,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)

ret = do_chunk_alloc(trans, root->fs_info->extent_root,
bytes + 2 * 1024 * 1024,
alloc_target, 0);
alloc_target,
CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans, root);
if (ret < 0) {
if (ret != -ENOSPC)
Expand Down Expand Up @@ -3239,31 +3260,56 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
rcu_read_lock();
list_for_each_entry_rcu(found, head, list) {
if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
found->force_alloc = 1;
found->force_alloc = CHUNK_ALLOC_FORCE;
}
rcu_read_unlock();
}

static int should_alloc_chunk(struct btrfs_root *root,
struct btrfs_space_info *sinfo, u64 alloc_bytes)
struct btrfs_space_info *sinfo, u64 alloc_bytes,
int force)
{
u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
u64 thresh;

if (sinfo->bytes_used + sinfo->bytes_reserved +
alloc_bytes + 256 * 1024 * 1024 < num_bytes)
if (force == CHUNK_ALLOC_FORCE)
return 1;

/*
* in limited mode, we want to have some free space up to
* about 1% of the FS size.
*/
if (force == CHUNK_ALLOC_LIMITED) {
thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
thresh = max_t(u64, 64 * 1024 * 1024,
div_factor_fine(thresh, 1));

if (num_bytes - num_allocated < thresh)
return 1;
}

/*
* we have two similar checks here, one based on percentage
* and once based on a hard number of 256MB. The idea
* is that if we have a good amount of free
* room, don't allocate a chunk. A good mount is
* less than 80% utilized of the chunks we have allocated,
* or more than 256MB free
*/
if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
return 0;

if (sinfo->bytes_used + sinfo->bytes_reserved +
alloc_bytes < div_factor(num_bytes, 8))
if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
return 0;

thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);

/* 256MB or 5% of the FS */
thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));

if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
return 0;

return 1;
}

Expand All @@ -3273,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
{
struct btrfs_space_info *space_info;
struct btrfs_fs_info *fs_info = extent_root->fs_info;
int wait_for_alloc = 0;
int ret = 0;

mutex_lock(&fs_info->chunk_mutex);

flags = btrfs_reduce_alloc_profile(extent_root, flags);

space_info = __find_space_info(extent_root->fs_info, flags);
Expand All @@ -3287,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
}
BUG_ON(!space_info);

again:
spin_lock(&space_info->lock);
if (space_info->force_alloc)
force = 1;
force = space_info->force_alloc;
if (space_info->full) {
spin_unlock(&space_info->lock);
goto out;
return 0;
}

if (!force && !should_alloc_chunk(extent_root, space_info,
alloc_bytes)) {
if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
spin_unlock(&space_info->lock);
goto out;
return 0;
} else if (space_info->chunk_alloc) {
wait_for_alloc = 1;
} else {
space_info->chunk_alloc = 1;
}

spin_unlock(&space_info->lock);

mutex_lock(&fs_info->chunk_mutex);

/*
* The chunk_mutex is held throughout the entirety of a chunk
* allocation, so once we've acquired the chunk_mutex we know that the
* other guy is done and we need to recheck and see if we should
* allocate.
*/
if (wait_for_alloc) {
mutex_unlock(&fs_info->chunk_mutex);
wait_for_alloc = 0;
goto again;
}

/*
* If we have mixed data/metadata chunks we want to make sure we keep
* allocating mixed chunks instead of individual chunks.
Expand All @@ -3327,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
space_info->full = 1;
else
ret = 1;
space_info->force_alloc = 0;

space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
out:
mutex_unlock(&extent_root->fs_info->chunk_mutex);
return ret;
}
Expand Down Expand Up @@ -5303,11 +5368,13 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,

if (allowed_chunk_alloc) {
ret = do_chunk_alloc(trans, root, num_bytes +
2 * 1024 * 1024, data, 1);
2 * 1024 * 1024, data,
CHUNK_ALLOC_LIMITED);
allowed_chunk_alloc = 0;
done_chunk_alloc = 1;
} else if (!done_chunk_alloc) {
space_info->force_alloc = 1;
} else if (!done_chunk_alloc &&
space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
space_info->force_alloc = CHUNK_ALLOC_LIMITED;
}

if (loop < LOOP_NO_EMPTY_SIZE) {
Expand Down Expand Up @@ -5393,7 +5460,8 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
*/
if (empty_size || root->ref_cows)
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
num_bytes + 2 * 1024 * 1024, data, 0);
num_bytes + 2 * 1024 * 1024, data,
CHUNK_ALLOC_NO_FORCE);

WARN_ON(num_bytes < root->sectorsize);
ret = find_free_extent(trans, root, num_bytes, empty_size,
Expand All @@ -5405,7 +5473,7 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
num_bytes = num_bytes & ~(root->sectorsize - 1);
num_bytes = max(num_bytes, min_alloc_size);
do_chunk_alloc(trans, root->fs_info->extent_root,
num_bytes, data, 1);
num_bytes, data, CHUNK_ALLOC_FORCE);
goto again;
}
if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
Expand Down Expand Up @@ -8109,13 +8177,15 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,

alloc_flags = update_block_group_flags(root, cache->flags);
if (alloc_flags != cache->flags)
do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
CHUNK_ALLOC_FORCE);

ret = set_block_group_ro(cache);
if (!ret)
goto out;
alloc_flags = get_alloc_profile(root, cache->space_info->flags);
ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
ret = set_block_group_ro(cache);
Expand All @@ -8128,7 +8198,8 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 type)
{
u64 alloc_flags = get_alloc_profile(root, type);
return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
CHUNK_ALLOC_FORCE);
}

/*
Expand Down
Loading

0 comments on commit adff377

Please sign in to comment.