Skip to content

Commit

Permalink
Merge branch 'cleanups-for-4.1-v2' of git://git.kernel.org/pub/scm/li…
Browse files Browse the repository at this point in the history
…nux/kernel/git/kdave/linux into for-linus-4.1
  • Loading branch information
Chris Mason committed Mar 25, 2015
2 parents bc465aa + 258ece0 commit 9deed22
Show file tree
Hide file tree
Showing 21 changed files with 126 additions and 135 deletions.
4 changes: 2 additions & 2 deletions fs/btrfs/backref.c
Original file line number Diff line number Diff line change
Expand Up @@ -1206,7 +1206,7 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans,
struct ulist *roots = NULL;
struct ulist_iterator uiter;
struct ulist_node *node;
struct seq_list elem = {};
struct seq_list elem = SEQ_LIST_INIT(elem);
int ret = 0;

tmp = ulist_alloc(GFP_NOFS);
Expand Down Expand Up @@ -1610,7 +1610,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
struct ulist *roots = NULL;
struct ulist_node *ref_node = NULL;
struct ulist_node *root_node = NULL;
struct seq_list tree_mod_seq_elem = {};
struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
struct ulist_iterator ref_uiter;
struct ulist_iterator root_uiter;

Expand Down
4 changes: 2 additions & 2 deletions fs/btrfs/check-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -2990,8 +2990,8 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
(unsigned long long)bio->bi_iter.bi_sector,
dev_bytenr, bio->bi_bdev);

mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
GFP_NOFS);
mapped_datav = kmalloc_array(bio->bi_vcnt,
sizeof(*mapped_datav), GFP_NOFS);
if (!mapped_datav)
goto leave;
cur_bytenr = dev_bytenr;
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/compression.c
Original file line number Diff line number Diff line change
Expand Up @@ -622,7 +622,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
cb->orig_bio = bio;

nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE);
cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
GFP_NOFS);
if (!cb->compressed_pages)
goto fail1;
Expand Down
9 changes: 4 additions & 5 deletions fs/btrfs/ctree.c
Original file line number Diff line number Diff line change
Expand Up @@ -578,7 +578,7 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
if (!tree_mod_need_log(fs_info, eb))
return 0;

tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags);
tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
if (!tm_list)
return -ENOMEM;

Expand Down Expand Up @@ -677,7 +677,7 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,

if (log_removal && btrfs_header_level(old_root) > 0) {
nritems = btrfs_header_nritems(old_root);
tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
flags);
if (!tm_list) {
ret = -ENOMEM;
Expand Down Expand Up @@ -814,7 +814,7 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
return 0;

tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *),
tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
GFP_NOFS);
if (!tm_list)
return -ENOMEM;
Expand Down Expand Up @@ -905,8 +905,7 @@ tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
return 0;

nritems = btrfs_header_nritems(eb);
tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
GFP_NOFS);
tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
if (!tm_list)
return -ENOMEM;

Expand Down
2 changes: 2 additions & 0 deletions fs/btrfs/ctree.h
Original file line number Diff line number Diff line change
Expand Up @@ -1329,6 +1329,8 @@ struct seq_list {
u64 seq;
};

#define SEQ_LIST_INIT(name) { .list = LIST_HEAD_INIT((name).list), .seq = 0 }

enum btrfs_orphan_cleanup_state {
ORPHAN_CLEANUP_STARTED = 1,
ORPHAN_CLEANUP_DONE = 2,
Expand Down
6 changes: 3 additions & 3 deletions fs/btrfs/dev-replace.c
Original file line number Diff line number Diff line change
Expand Up @@ -670,8 +670,8 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
srcdev = dev_replace->srcdev;
args->status.progress_1000 = div64_u64(dev_replace->cursor_left,
div64_u64(btrfs_device_get_total_bytes(srcdev), 1000));
args->status.progress_1000 = div_u64(dev_replace->cursor_left,
div_u64(btrfs_device_get_total_bytes(srcdev), 1000));
break;
}
btrfs_dev_replace_unlock(dev_replace);
Expand Down Expand Up @@ -806,7 +806,7 @@ static int btrfs_dev_replace_kthread(void *data)
btrfs_dev_replace_status(fs_info, status_args);
progress = status_args->status.progress_1000;
kfree(status_args);
do_div(progress, 10);
progress = div_u64(progress, 10);
printk_in_rcu(KERN_INFO
"BTRFS: continuing dev_replace from %s (devid %llu) to %s @%u%%\n",
dev_replace->srcdev->missing ? "<missing disk>" :
Expand Down
4 changes: 2 additions & 2 deletions fs/btrfs/disk-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
offset += cur_len;
}
if (csum_size > sizeof(inline_result)) {
result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
result = kzalloc(csum_size, GFP_NOFS);
if (!result)
return 1;
} else {
Expand Down Expand Up @@ -2276,7 +2276,7 @@ int open_ctree(struct super_block *sb,
fs_info->free_chunk_space = 0;
fs_info->tree_mod_log = RB_ROOT;
fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
fs_info->avg_delayed_ref_runtime = div64_u64(NSEC_PER_SEC, 64);
fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
/* readahead state */
INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
spin_lock_init(&fs_info->reada_lock);
Expand Down
21 changes: 10 additions & 11 deletions fs/btrfs/extent-tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -2561,8 +2561,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
*/
spin_lock(&delayed_refs->lock);
avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
avg = div64_u64(avg, 4);
fs_info->avg_delayed_ref_runtime = avg;
fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
spin_unlock(&delayed_refs->lock);
}
return 0;
Expand Down Expand Up @@ -2624,7 +2623,7 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
* We don't ever fill up leaves all the way so multiply by 2 just to be
* closer to what we're really going to want to ouse.
*/
return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
}

int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
Expand Down Expand Up @@ -3193,7 +3192,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
struct inode *inode = NULL;
u64 alloc_hint = 0;
int dcs = BTRFS_DC_ERROR;
int num_pages = 0;
u64 num_pages = 0;
int retries = 0;
int ret = 0;

Expand Down Expand Up @@ -3293,7 +3292,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
* taking up quite a bit since it's not folded into the other space
* cache.
*/
num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
if (!num_pages)
num_pages = 1;

Expand Down Expand Up @@ -4812,10 +4811,10 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)

num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
csum_size * 2;
num_bytes += div64_u64(data_used + meta_used, 50);
num_bytes += div_u64(data_used + meta_used, 50);

if (num_bytes * 3 > meta_used)
num_bytes = div64_u64(meta_used, 3);
num_bytes = div_u64(meta_used, 3);

return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
}
Expand Down Expand Up @@ -5075,16 +5074,16 @@ static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
BTRFS_I(inode)->csum_bytes == 0)
return 0;

old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
old_csums = (int)div_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
if (reserve)
BTRFS_I(inode)->csum_bytes += num_bytes;
else
BTRFS_I(inode)->csum_bytes -= num_bytes;
csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
num_csums_per_leaf = (int)div64_u64(csum_size,
num_csums_per_leaf = (int)div_u64(csum_size,
sizeof(struct btrfs_csum_item) +
sizeof(struct btrfs_disk_key));
num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
num_csums = (int)div_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
num_csums = num_csums + num_csums_per_leaf - 1;
num_csums = num_csums / num_csums_per_leaf;

Expand Down Expand Up @@ -8720,7 +8719,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
min_free <<= 1;
} else if (index == BTRFS_RAID_RAID0) {
dev_min = fs_devices->rw_devices;
do_div(min_free, dev_min);
min_free = div64_u64(min_free, dev_min);
}

/* We need to do this so that we can look at pending chunks */
Expand Down
4 changes: 2 additions & 2 deletions fs/btrfs/file-item.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,8 +185,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
if (!dst) {
if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size,
GFP_NOFS);
btrfs_bio->csum_allocated = kmalloc_array(nblocks,
csum_size, GFP_NOFS);
if (!btrfs_bio->csum_allocated) {
btrfs_free_path(path);
return -ENOMEM;
Expand Down
12 changes: 4 additions & 8 deletions fs/btrfs/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -273,11 +273,7 @@ void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
defrag = rb_entry(node, struct inode_defrag, rb_node);
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);

if (need_resched()) {
spin_unlock(&fs_info->defrag_inodes_lock);
cond_resched();
spin_lock(&fs_info->defrag_inodes_lock);
}
cond_resched_lock(&fs_info->defrag_inodes_lock);

node = rb_first(&fs_info->defrag_inodes);
}
Expand Down Expand Up @@ -1485,7 +1481,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
PAGE_CACHE_SIZE / (sizeof(struct page *)));
nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
nrptrs = max(nrptrs, 8);
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
if (!pages)
return -ENOMEM;

Expand Down Expand Up @@ -1635,8 +1631,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
btrfs_end_write_no_snapshoting(root);

if (only_release_metadata && copied > 0) {
u64 lockstart = round_down(pos, root->sectorsize);
u64 lockend = lockstart +
lockstart = round_down(pos, root->sectorsize);
lockend = lockstart +
(dirty_pages << PAGE_CACHE_SHIFT) - 1;

set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
Expand Down
36 changes: 15 additions & 21 deletions fs/btrfs/free-space-cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,

memset(io_ctl, 0, sizeof(struct io_ctl));

io_ctl->pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
if (!io_ctl->pages)
return -ENOMEM;

Expand Down Expand Up @@ -1298,11 +1298,11 @@ static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
u64 offset)
{
u64 bitmap_start;
u64 bytes_per_bitmap;
u32 bytes_per_bitmap;

bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
bitmap_start = offset - ctl->start;
bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
bitmap_start = div_u64(bitmap_start, bytes_per_bitmap);
bitmap_start *= bytes_per_bitmap;
bitmap_start += ctl->start;

Expand Down Expand Up @@ -1521,10 +1521,10 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
u64 bitmap_bytes;
u64 extent_bytes;
u64 size = block_group->key.offset;
u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
u32 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
u32 max_bitmaps = div_u64(size + bytes_per_bg - 1, bytes_per_bg);

max_bitmaps = max(max_bitmaps, 1);
max_bitmaps = max_t(u32, max_bitmaps, 1);

ASSERT(ctl->total_bitmaps <= max_bitmaps);

Expand All @@ -1537,7 +1537,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
max_bytes = MAX_CACHE_BYTES_PER_GIG;
else
max_bytes = MAX_CACHE_BYTES_PER_GIG *
div64_u64(size, 1024 * 1024 * 1024);
div_u64(size, 1024 * 1024 * 1024);

/*
* we want to account for 1 more bitmap than what we have so we can make
Expand All @@ -1552,14 +1552,14 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
}

/*
* we want the extent entry threshold to always be at most 1/2 the maxw
* we want the extent entry threshold to always be at most 1/2 the max
* bytes we can have, or whatever is less than that.
*/
extent_bytes = max_bytes - bitmap_bytes;
extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);

ctl->extents_thresh =
div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
div_u64(extent_bytes, sizeof(struct btrfs_free_space));
}

static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
Expand Down Expand Up @@ -1673,7 +1673,7 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
*/
if (*bytes >= align) {
tmp = entry->offset - ctl->start + align - 1;
do_div(tmp, align);
tmp = div64_u64(tmp, align);
tmp = tmp * align + ctl->start;
align_off = tmp - entry->offset;
} else {
Expand Down Expand Up @@ -2402,11 +2402,8 @@ static void __btrfs_remove_free_space_cache_locked(
} else {
free_bitmap(ctl, info);
}
if (need_resched()) {
spin_unlock(&ctl->tree_lock);
cond_resched();
spin_lock(&ctl->tree_lock);
}

cond_resched_lock(&ctl->tree_lock);
}
}

Expand All @@ -2431,11 +2428,8 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)

WARN_ON(cluster->block_group != block_group);
__btrfs_return_cluster_to_free_space(block_group, cluster);
if (need_resched()) {
spin_unlock(&ctl->tree_lock);
cond_resched();
spin_lock(&ctl->tree_lock);
}

cond_resched_lock(&ctl->tree_lock);
}
__btrfs_remove_free_space_cache_locked(ctl);
spin_unlock(&ctl->tree_lock);
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -470,7 +470,7 @@ static noinline void compress_file_range(struct inode *inode,
*/
if (inode_need_compress(inode)) {
WARN_ON(pages);
pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
if (!pages) {
/* just bail out to the uncompressed code */
goto cont;
Expand Down
4 changes: 2 additions & 2 deletions fs/btrfs/ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -1564,7 +1564,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
goto out_free;
}

do_div(new_size, root->sectorsize);
new_size = div_u64(new_size, root->sectorsize);
new_size *= root->sectorsize;

printk_in_rcu(KERN_INFO "BTRFS: new size for %s is %llu\n",
Expand Down Expand Up @@ -3039,7 +3039,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
static int check_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
u64 disko)
{
struct seq_list tree_mod_seq_elem = {};
struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
struct ulist *roots;
struct ulist_iterator uiter;
struct ulist_node *root_node = NULL;
Expand Down
6 changes: 2 additions & 4 deletions fs/btrfs/math.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,17 +28,15 @@ static inline u64 div_factor(u64 num, int factor)
if (factor == 10)
return num;
num *= factor;
do_div(num, 10);
return num;
return div_u64(num, 10);
}

static inline u64 div_factor_fine(u64 num, int factor)
{
if (factor == 100)
return num;
num *= factor;
do_div(num, 100);
return num;
return div_u64(num, 100);
}

#endif
Loading

0 comments on commit 9deed22

Please sign in to comment.