Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 223088
b: refs/heads/master
c: d5f04ff
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Dec 14, 2010
1 parent 381ba68 commit ce71fc5
Show file tree
Hide file tree
Showing 52 changed files with 650 additions and 372 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8b0f1840a46449e1946fc88860ef3ec8d6b1c2c7
refs/heads/master: d5f04ff5fba75e3e9607a65f46cfbfbdf8d69ce4
7 changes: 6 additions & 1 deletion trunk/Documentation/filesystems/Locking
Original file line number Diff line number Diff line change
Expand Up @@ -173,12 +173,13 @@ prototypes:
sector_t (*bmap)(struct address_space *, sector_t);
int (*invalidatepage) (struct page *, unsigned long);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
loff_t offset, unsigned long nr_segs);
int (*launder_page) (struct page *);

locking rules:
All except set_page_dirty may block
All except set_page_dirty and freepage may block

BKL PageLocked(page) i_mutex
writepage: no yes, unlocks (see below)
Expand All @@ -193,6 +194,7 @@ perform_write: no n/a yes
bmap: no
invalidatepage: no yes
releasepage: no yes
freepage: no yes
direct_IO: no
launder_page: no yes

Expand Down Expand Up @@ -288,6 +290,9 @@ buffers from the page in preparation for freeing it. It returns zero to
indicate that the buffers are (or may be) freeable. If ->releasepage is zero,
the kernel assumes that the fs has no private interest in the buffers.

->freepage() is called when the kernel is done dropping the page
from the page cache.

->launder_page() may be called prior to releasing a page if
it is still found to be dirty. It returns zero if the page was successfully
cleaned, or an error value if not. Note that in order to prevent the page
Expand Down
7 changes: 7 additions & 0 deletions trunk/Documentation/filesystems/vfs.txt
Original file line number Diff line number Diff line change
Expand Up @@ -534,6 +534,7 @@ struct address_space_operations {
sector_t (*bmap)(struct address_space *, sector_t);
int (*invalidatepage) (struct page *, unsigned long);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
loff_t offset, unsigned long nr_segs);
struct page* (*get_xip_page)(struct address_space *, sector_t,
Expand Down Expand Up @@ -678,6 +679,12 @@ struct address_space_operations {
need to ensure this. Possibly it can clear the PageUptodate
bit if it cannot free private data yet.

freepage: freepage is called once the page is no longer visible in
the page cache in order to allow the cleanup of any private
data. Since it may be called by the memory reclaimer, it
should not assume that the original address_space mapping still
exists, and it should not block.

direct_IO: called by the generic read/write routines to perform
direct_IO - that is IO requests which bypass the page cache
and transfer data directly between the storage and the
Expand Down
11 changes: 6 additions & 5 deletions trunk/fs/btrfs/disk-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -696,6 +696,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
__btree_submit_bio_done);
}

#ifdef CONFIG_MIGRATION
static int btree_migratepage(struct address_space *mapping,
struct page *newpage, struct page *page)
{
Expand All @@ -712,12 +713,9 @@ static int btree_migratepage(struct address_space *mapping,
if (page_has_private(page) &&
!try_to_release_page(page, GFP_KERNEL))
return -EAGAIN;
#ifdef CONFIG_MIGRATION
return migrate_page(mapping, newpage, page);
#else
return -ENOSYS;
#endif
}
#endif

static int btree_writepage(struct page *page, struct writeback_control *wbc)
{
Expand Down Expand Up @@ -1009,7 +1007,10 @@ static int find_and_setup_root(struct btrfs_root *tree_root,
blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
blocksize, generation);
BUG_ON(!root->node);
if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) {
free_extent_buffer(root->node);
return -EIO;
}
root->commit_root = btrfs_root_node(root);
return 0;
}
Expand Down
75 changes: 59 additions & 16 deletions trunk/fs/btrfs/extent-tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -429,6 +429,7 @@ static int caching_kthread(void *data)

static int cache_block_group(struct btrfs_block_group_cache *cache,
struct btrfs_trans_handle *trans,
struct btrfs_root *root,
int load_cache_only)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
Expand All @@ -442,9 +443,12 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,

/*
* We can't do the read from on-disk cache during a commit since we need
* to have the normal tree locking.
* to have the normal tree locking. Also if we are currently trying to
* allocate blocks for the tree root we can't do the fast caching since
* we likely hold important locks.
*/
if (!trans->transaction->in_commit) {
if (!trans->transaction->in_commit &&
(root && root != root->fs_info->tree_root)) {
spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_NO) {
spin_unlock(&cache->lock);
Expand Down Expand Up @@ -2741,6 +2745,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
struct btrfs_root *root = block_group->fs_info->tree_root;
struct inode *inode = NULL;
u64 alloc_hint = 0;
int dcs = BTRFS_DC_ERROR;
int num_pages = 0;
int retries = 0;
int ret = 0;
Expand Down Expand Up @@ -2795,6 +2800,8 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,

spin_lock(&block_group->lock);
if (block_group->cached != BTRFS_CACHE_FINISHED) {
/* We're not cached, don't bother trying to write stuff out */
dcs = BTRFS_DC_WRITTEN;
spin_unlock(&block_group->lock);
goto out_put;
}
Expand All @@ -2821,17 +2828,16 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
num_pages, num_pages,
&alloc_hint);
if (!ret)
dcs = BTRFS_DC_SETUP;
btrfs_free_reserved_data_space(inode, num_pages);
out_put:
iput(inode);
out_free:
btrfs_release_path(root, path);
out:
spin_lock(&block_group->lock);
if (ret)
block_group->disk_cache_state = BTRFS_DC_ERROR;
else
block_group->disk_cache_state = BTRFS_DC_SETUP;
block_group->disk_cache_state = dcs;
spin_unlock(&block_group->lock);

return ret;
Expand Down Expand Up @@ -3037,7 +3043,13 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)

u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
{
u64 num_devices = root->fs_info->fs_devices->rw_devices;
/*
* we add in the count of missing devices because we want
* to make sure that any RAID levels on a degraded FS
* continue to be honored.
*/
u64 num_devices = root->fs_info->fs_devices->rw_devices +
root->fs_info->fs_devices->missing_devices;

if (num_devices == 1)
flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
Expand Down Expand Up @@ -4080,7 +4092,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
* space back to the block group, otherwise we will leak space.
*/
if (!alloc && cache->cached == BTRFS_CACHE_NO)
cache_block_group(cache, trans, 1);
cache_block_group(cache, trans, NULL, 1);

byte_in_group = bytenr - cache->key.objectid;
WARN_ON(byte_in_group > cache->key.offset);
Expand Down Expand Up @@ -4930,11 +4942,31 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
btrfs_get_block_group(block_group);
search_start = block_group->key.objectid;

/*
* this can happen if we end up cycling through all the
* raid types, but we want to make sure we only allocate
* for the proper type.
*/
if (!block_group_bits(block_group, data)) {
u64 extra = BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10;

/*
* if they asked for extra copies and this block group
* doesn't provide them, bail. This does allow us to
* fill raid0 from raid1.
*/
if ((data & extra) && !(block_group->flags & extra))
goto loop;
}

have_block_group:
if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
u64 free_percent;

ret = cache_block_group(block_group, trans, 1);
ret = cache_block_group(block_group, trans,
orig_root, 1);
if (block_group->cached == BTRFS_CACHE_FINISHED)
goto have_block_group;

Expand All @@ -4958,7 +4990,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
if (loop > LOOP_CACHING_NOWAIT ||
(loop > LOOP_FIND_IDEAL &&
atomic_read(&space_info->caching_threads) < 2)) {
ret = cache_block_group(block_group, trans, 0);
ret = cache_block_group(block_group, trans,
orig_root, 0);
BUG_ON(ret);
}
found_uncached_bg = true;
Expand Down Expand Up @@ -5515,7 +5548,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
u64 num_bytes = ins->offset;

block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
cache_block_group(block_group, trans, 0);
cache_block_group(block_group, trans, NULL, 0);
caching_ctl = get_caching_control(block_group);

if (!caching_ctl) {
Expand Down Expand Up @@ -6300,9 +6333,13 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
NULL, NULL);
BUG_ON(ret < 0);
if (ret > 0) {
ret = btrfs_del_orphan_item(trans, tree_root,
root->root_key.objectid);
BUG_ON(ret);
/* if we fail to delete the orphan item this time
* around, it'll get picked up the next time.
*
* The most common failure here is just -ENOENT.
*/
btrfs_del_orphan_item(trans, tree_root,
root->root_key.objectid);
}
}

Expand Down Expand Up @@ -7878,7 +7915,14 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;

num_devices = root->fs_info->fs_devices->rw_devices;
/*
* we add in the count of missing devices because we want
* to make sure that any RAID levels on a degraded FS
* continue to be honored.
*/
num_devices = root->fs_info->fs_devices->rw_devices +
root->fs_info->fs_devices->missing_devices;

if (num_devices == 1) {
stripped |= BTRFS_BLOCK_GROUP_DUP;
stripped = flags & ~stripped;
Expand Down Expand Up @@ -8247,7 +8291,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
break;
if (ret != 0)
goto error;

leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
cache = kzalloc(sizeof(*cache), GFP_NOFS);
Expand Down
Loading

0 comments on commit ce71fc5

Please sign in to comment.