Skip to content

Commit

Permalink
Merge branch 'misc-cleanups-4.5' of git://git.kernel.org/pub/scm/linu…
Browse files Browse the repository at this point in the history
…x/kernel/git/kdave/linux into for-linus-4.5

Signed-off-by: Chris Mason <clm@fb.com>
  • Loading branch information
Chris Mason committed Jan 11, 2016
2 parents a305810 + a7ca422 commit b28cf57
Show file tree
Hide file tree
Showing 24 changed files with 195 additions and 305 deletions.
23 changes: 6 additions & 17 deletions fs/btrfs/backref.c
Original file line number Diff line number Diff line change
Expand Up @@ -520,13 +520,10 @@ static inline int ref_for_same_block(struct __prelim_ref *ref1,
static int __add_missing_keys(struct btrfs_fs_info *fs_info,
struct list_head *head)
{
struct list_head *pos;
struct __prelim_ref *ref;
struct extent_buffer *eb;

list_for_each(pos, head) {
struct __prelim_ref *ref;
ref = list_entry(pos, struct __prelim_ref, list);

list_for_each_entry(ref, head, list) {
if (ref->parent)
continue;
if (ref->key_for_search.type)
Expand Down Expand Up @@ -563,23 +560,15 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info,
*/
static void __merge_refs(struct list_head *head, int mode)
{
struct list_head *pos1;
struct __prelim_ref *ref1;

list_for_each(pos1, head) {
struct list_head *n2;
struct list_head *pos2;
struct __prelim_ref *ref1;
list_for_each_entry(ref1, head, list) {
struct __prelim_ref *ref2 = ref1, *tmp;

ref1 = list_entry(pos1, struct __prelim_ref, list);

for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
pos2 = n2, n2 = pos2->next) {
struct __prelim_ref *ref2;
list_for_each_entry_safe_continue(ref2, tmp, head, list) {
struct __prelim_ref *xchg;
struct extent_inode_elem *eie;

ref2 = list_entry(pos2, struct __prelim_ref, list);

if (!ref_for_same_block(ref1, ref2))
continue;
if (mode == 1) {
Expand Down
105 changes: 26 additions & 79 deletions fs/btrfs/check-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -531,13 +531,9 @@ static struct btrfsic_block *btrfsic_block_hashtable_lookup(
(((unsigned int)(dev_bytenr >> 16)) ^
((unsigned int)((uintptr_t)bdev))) &
(BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
struct list_head *elem;

list_for_each(elem, h->table + hashval) {
struct btrfsic_block *const b =
list_entry(elem, struct btrfsic_block,
collision_resolving_node);
struct btrfsic_block *b;

list_for_each_entry(b, h->table + hashval, collision_resolving_node) {
if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr)
return b;
}
Expand Down Expand Up @@ -588,13 +584,9 @@ static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
((unsigned int)((uintptr_t)bdev_ref_to)) ^
((unsigned int)((uintptr_t)bdev_ref_from))) &
(BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
struct list_head *elem;

list_for_each(elem, h->table + hashval) {
struct btrfsic_block_link *const l =
list_entry(elem, struct btrfsic_block_link,
collision_resolving_node);
struct btrfsic_block_link *l;

list_for_each_entry(l, h->table + hashval, collision_resolving_node) {
BUG_ON(NULL == l->block_ref_to);
BUG_ON(NULL == l->block_ref_from);
if (l->block_ref_to->dev_state->bdev == bdev_ref_to &&
Expand Down Expand Up @@ -639,13 +631,9 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
const unsigned int hashval =
(((unsigned int)((uintptr_t)bdev)) &
(BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
struct list_head *elem;

list_for_each(elem, h->table + hashval) {
struct btrfsic_dev_state *const ds =
list_entry(elem, struct btrfsic_dev_state,
collision_resolving_node);
struct btrfsic_dev_state *ds;

list_for_each_entry(ds, h->table + hashval, collision_resolving_node) {
if (ds->bdev == bdev)
return ds;
}
Expand Down Expand Up @@ -1720,29 +1708,20 @@ static int btrfsic_read_block(struct btrfsic_state *state,

static void btrfsic_dump_database(struct btrfsic_state *state)
{
struct list_head *elem_all;
const struct btrfsic_block *b_all;

BUG_ON(NULL == state);

printk(KERN_INFO "all_blocks_list:\n");
list_for_each(elem_all, &state->all_blocks_list) {
const struct btrfsic_block *const b_all =
list_entry(elem_all, struct btrfsic_block,
all_blocks_node);
struct list_head *elem_ref_to;
struct list_head *elem_ref_from;
list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) {
const struct btrfsic_block_link *l;

printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n",
btrfsic_get_block_type(state, b_all),
b_all->logical_bytenr, b_all->dev_state->name,
b_all->dev_bytenr, b_all->mirror_num);

list_for_each(elem_ref_to, &b_all->ref_to_list) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_to,
struct btrfsic_block_link,
node_ref_to);

list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) {
printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
" refers %u* to"
" %c @%llu (%s/%llu/%d)\n",
Expand All @@ -1757,12 +1736,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
l->block_ref_to->mirror_num);
}

list_for_each(elem_ref_from, &b_all->ref_from_list) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_from,
struct btrfsic_block_link,
node_ref_from);

list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) {
printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
" is ref %u* from"
" %c @%llu (%s/%llu/%d)\n",
Expand Down Expand Up @@ -1845,8 +1819,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
&state->block_hashtable);
if (NULL != block) {
u64 bytenr = 0;
struct list_head *elem_ref_to;
struct list_head *tmp_ref_to;
struct btrfsic_block_link *l, *tmp;

if (block->is_superblock) {
bytenr = btrfs_super_bytenr((struct btrfs_super_block *)
Expand Down Expand Up @@ -1967,13 +1940,8 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
* because it still carries valueable information
* like whether it was ever written and IO completed.
*/
list_for_each_safe(elem_ref_to, tmp_ref_to,
&block->ref_to_list) {
struct btrfsic_block_link *const l =
list_entry(elem_ref_to,
struct btrfsic_block_link,
node_ref_to);

list_for_each_entry_safe(l, tmp, &block->ref_to_list,
node_ref_to) {
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
btrfsic_print_rem_link(state, l);
l->ref_cnt--;
Expand Down Expand Up @@ -2436,7 +2404,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
struct btrfsic_block *const block,
int recursion_level)
{
struct list_head *elem_ref_to;
const struct btrfsic_block_link *l;
int ret = 0;

if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
Expand Down Expand Up @@ -2464,11 +2432,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
* This algorithm is recursive because the amount of used stack
* space is very small and the max recursion depth is limited.
*/
list_for_each(elem_ref_to, &block->ref_to_list) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_to, struct btrfsic_block_link,
node_ref_to);

list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"rl=%d, %c @%llu (%s/%llu/%d)"
Expand Down Expand Up @@ -2561,7 +2525,7 @@ static int btrfsic_is_block_ref_by_superblock(
const struct btrfsic_block *block,
int recursion_level)
{
struct list_head *elem_ref_from;
const struct btrfsic_block_link *l;

if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
/* refer to comment at "abort cyclic linkage (case 1)" */
Expand All @@ -2576,11 +2540,7 @@ static int btrfsic_is_block_ref_by_superblock(
* This algorithm is recursive because the amount of used stack space
* is very small and the max recursion depth is limited.
*/
list_for_each(elem_ref_from, &block->ref_from_list) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_from, struct btrfsic_block_link,
node_ref_from);

list_for_each_entry(l, &block->ref_from_list, node_ref_from) {
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"rl=%d, %c @%llu (%s/%llu/%d)"
Expand Down Expand Up @@ -2669,7 +2629,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
const struct btrfsic_block *block,
int indent_level)
{
struct list_head *elem_ref_to;
const struct btrfsic_block_link *l;
int indent_add;
static char buf[80];
int cursor_position;
Expand Down Expand Up @@ -2704,11 +2664,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
}

cursor_position = indent_level;
list_for_each(elem_ref_to, &block->ref_to_list) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_to, struct btrfsic_block_link,
node_ref_to);

list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
while (cursor_position < indent_level) {
printk(" ");
cursor_position++;
Expand Down Expand Up @@ -3165,8 +3121,7 @@ int btrfsic_mount(struct btrfs_root *root,
void btrfsic_unmount(struct btrfs_root *root,
struct btrfs_fs_devices *fs_devices)
{
struct list_head *elem_all;
struct list_head *tmp_all;
struct btrfsic_block *b_all, *tmp_all;
struct btrfsic_state *state;
struct list_head *dev_head = &fs_devices->devices;
struct btrfs_device *device;
Expand Down Expand Up @@ -3206,20 +3161,12 @@ void btrfsic_unmount(struct btrfs_root *root,
* just free all memory that was allocated dynamically.
* Free the blocks and the block_links.
*/
list_for_each_safe(elem_all, tmp_all, &state->all_blocks_list) {
struct btrfsic_block *const b_all =
list_entry(elem_all, struct btrfsic_block,
all_blocks_node);
struct list_head *elem_ref_to;
struct list_head *tmp_ref_to;

list_for_each_safe(elem_ref_to, tmp_ref_to,
&b_all->ref_to_list) {
struct btrfsic_block_link *const l =
list_entry(elem_ref_to,
struct btrfsic_block_link,
node_ref_to);
list_for_each_entry_safe(b_all, tmp_all, &state->all_blocks_list,
all_blocks_node) {
struct btrfsic_block_link *l, *tmp;

list_for_each_entry_safe(l, tmp, &b_all->ref_to_list,
node_ref_to) {
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
btrfsic_print_rem_link(state, l);

Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/ctree.c
Original file line number Diff line number Diff line change
Expand Up @@ -1555,7 +1555,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
return 0;
}

search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
search_start = buf->start & ~((u64)SZ_1G - 1);

if (parent)
btrfs_set_lock_blocking(parent);
Expand Down
7 changes: 4 additions & 3 deletions fs/btrfs/ctree.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
#include <linux/btrfs.h>
#include <linux/workqueue.h>
#include <linux/security.h>
#include <linux/sizes.h>
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"
Expand Down Expand Up @@ -199,9 +200,9 @@ static const int btrfs_csum_sizes[] = { 4 };
/* ioprio of readahead is set to idle */
#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))

#define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024)
#define BTRFS_DIRTY_METADATA_THRESH SZ_32M

#define BTRFS_MAX_EXTENT_SIZE (128 * 1024 * 1024)
#define BTRFS_MAX_EXTENT_SIZE SZ_128M

/*
* The key defines the order in the tree, and so it also defines (optimal)
Expand Down Expand Up @@ -4347,7 +4348,7 @@ static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info,
#define btrfs_fs_incompat(fs_info, opt) \
__btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)

static inline int __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag)
static inline bool __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag)
{
struct btrfs_super_block *disk_super;
disk_super = fs_info->super_copy;
Expand Down
7 changes: 1 addition & 6 deletions fs/btrfs/delayed-inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,16 +54,11 @@ static inline void btrfs_init_delayed_node(
delayed_node->root = root;
delayed_node->inode_id = inode_id;
atomic_set(&delayed_node->refs, 0);
delayed_node->count = 0;
delayed_node->flags = 0;
delayed_node->ins_root = RB_ROOT;
delayed_node->del_root = RB_ROOT;
mutex_init(&delayed_node->mutex);
delayed_node->index_cnt = 0;
INIT_LIST_HEAD(&delayed_node->n_list);
INIT_LIST_HEAD(&delayed_node->p_list);
delayed_node->bytes_reserved = 0;
memset(&delayed_node->inode_item, 0, sizeof(delayed_node->inode_item));
}

static inline int btrfs_is_continuous_delayed_item(
Expand Down Expand Up @@ -132,7 +127,7 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
if (node)
return node;

node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
if (!node)
return ERR_PTR(-ENOMEM);
btrfs_init_delayed_node(node, root, ino);
Expand Down
3 changes: 1 addition & 2 deletions fs/btrfs/disk-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -2824,7 +2824,7 @@ int open_ctree(struct super_block *sb,

fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
4 * 1024 * 1024 / PAGE_CACHE_SIZE);
SZ_4M / PAGE_CACHE_SIZE);

tree_root->nodesize = nodesize;
tree_root->sectorsize = sectorsize;
Expand Down Expand Up @@ -3996,7 +3996,6 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
balance_dirty_pages_ratelimited(
root->fs_info->btree_inode->i_mapping);
}
return;
}

void btrfs_btree_balance_dirty(struct btrfs_root *root)
Expand Down
4 changes: 2 additions & 2 deletions fs/btrfs/disk-io.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
#ifndef __DISKIO__
#define __DISKIO__

#define BTRFS_SUPER_INFO_OFFSET (64 * 1024)
#define BTRFS_SUPER_INFO_OFFSET SZ_64K
#define BTRFS_SUPER_INFO_SIZE 4096

#define BTRFS_SUPER_MIRROR_MAX 3
Expand All @@ -35,7 +35,7 @@ enum btrfs_wq_endio_type {

static inline u64 btrfs_sb_offset(int mirror)
{
u64 start = 16 * 1024;
u64 start = SZ_16K;
if (mirror)
return start << (BTRFS_SUPER_MIRROR_SHIFT * mirror);
return BTRFS_SUPER_INFO_OFFSET;
Expand Down
Loading

0 comments on commit b28cf57

Please sign in to comment.