Skip to content

Commit

Permalink
Merge branch 'cleanups/for-4.4' of git://git.kernel.org/pub/scm/linux…
Browse files Browse the repository at this point in the history
…/kernel/git/kdave/linux into for-linus-4.4
  • Loading branch information
Chris Mason committed Oct 22, 2015
2 parents 6db4a73 + ddd664f commit a0d58e4
Show file tree
Hide file tree
Showing 16 changed files with 220 additions and 211 deletions.
4 changes: 2 additions & 2 deletions fs/btrfs/check-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -667,7 +667,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS);
if (NULL == selected_super) {
printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
return -1;
return -ENOMEM;
}

list_for_each_entry(device, dev_head, dev_list) {
Expand Down Expand Up @@ -1660,7 +1660,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
sizeof(*block_ctx->pagev)) *
num_pages, GFP_NOFS);
if (!block_ctx->mem_to_free)
return -1;
return -ENOMEM;
block_ctx->datav = block_ctx->mem_to_free;
block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages);
for (i = 0; i < num_pages; i++) {
Expand Down
94 changes: 48 additions & 46 deletions fs/btrfs/compression.c
Original file line number Diff line number Diff line change
Expand Up @@ -745,11 +745,13 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
return ret;
}

static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES];
static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES];
static int comp_num_workspace[BTRFS_COMPRESS_TYPES];
static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES];
static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES];
static struct {
struct list_head idle_ws;
spinlock_t ws_lock;
int num_ws;
atomic_t alloc_ws;
wait_queue_head_t ws_wait;
} btrfs_comp_ws[BTRFS_COMPRESS_TYPES];

static const struct btrfs_compress_op * const btrfs_compress_op[] = {
&btrfs_zlib_compress,
Expand All @@ -761,10 +763,10 @@ void __init btrfs_init_compress(void)
int i;

for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
INIT_LIST_HEAD(&comp_idle_workspace[i]);
spin_lock_init(&comp_workspace_lock[i]);
atomic_set(&comp_alloc_workspace[i], 0);
init_waitqueue_head(&comp_workspace_wait[i]);
INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
spin_lock_init(&btrfs_comp_ws[i].ws_lock);
atomic_set(&btrfs_comp_ws[i].alloc_ws, 0);
init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
}
}

Expand All @@ -778,38 +780,38 @@ static struct list_head *find_workspace(int type)
int cpus = num_online_cpus();
int idx = type - 1;

struct list_head *idle_workspace = &comp_idle_workspace[idx];
spinlock_t *workspace_lock = &comp_workspace_lock[idx];
atomic_t *alloc_workspace = &comp_alloc_workspace[idx];
wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx];
int *num_workspace = &comp_num_workspace[idx];
struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws;
wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
int *num_ws = &btrfs_comp_ws[idx].num_ws;
again:
spin_lock(workspace_lock);
if (!list_empty(idle_workspace)) {
workspace = idle_workspace->next;
spin_lock(ws_lock);
if (!list_empty(idle_ws)) {
workspace = idle_ws->next;
list_del(workspace);
(*num_workspace)--;
spin_unlock(workspace_lock);
(*num_ws)--;
spin_unlock(ws_lock);
return workspace;

}
if (atomic_read(alloc_workspace) > cpus) {
if (atomic_read(alloc_ws) > cpus) {
DEFINE_WAIT(wait);

spin_unlock(workspace_lock);
prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE);
if (atomic_read(alloc_workspace) > cpus && !*num_workspace)
spin_unlock(ws_lock);
prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
if (atomic_read(alloc_ws) > cpus && !*num_ws)
schedule();
finish_wait(workspace_wait, &wait);
finish_wait(ws_wait, &wait);
goto again;
}
atomic_inc(alloc_workspace);
spin_unlock(workspace_lock);
atomic_inc(alloc_ws);
spin_unlock(ws_lock);

workspace = btrfs_compress_op[idx]->alloc_workspace();
if (IS_ERR(workspace)) {
atomic_dec(alloc_workspace);
wake_up(workspace_wait);
atomic_dec(alloc_ws);
wake_up(ws_wait);
}
return workspace;
}
Expand All @@ -821,30 +823,30 @@ static struct list_head *find_workspace(int type)
static void free_workspace(int type, struct list_head *workspace)
{
int idx = type - 1;
struct list_head *idle_workspace = &comp_idle_workspace[idx];
spinlock_t *workspace_lock = &comp_workspace_lock[idx];
atomic_t *alloc_workspace = &comp_alloc_workspace[idx];
wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx];
int *num_workspace = &comp_num_workspace[idx];

spin_lock(workspace_lock);
if (*num_workspace < num_online_cpus()) {
list_add(workspace, idle_workspace);
(*num_workspace)++;
spin_unlock(workspace_lock);
struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws;
wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
int *num_ws = &btrfs_comp_ws[idx].num_ws;

spin_lock(ws_lock);
if (*num_ws < num_online_cpus()) {
list_add(workspace, idle_ws);
(*num_ws)++;
spin_unlock(ws_lock);
goto wake;
}
spin_unlock(workspace_lock);
spin_unlock(ws_lock);

btrfs_compress_op[idx]->free_workspace(workspace);
atomic_dec(alloc_workspace);
atomic_dec(alloc_ws);
wake:
/*
* Make sure counter is updated before we wake up waiters.
*/
smp_mb();
if (waitqueue_active(workspace_wait))
wake_up(workspace_wait);
if (waitqueue_active(ws_wait))
wake_up(ws_wait);
}

/*
Expand All @@ -856,11 +858,11 @@ static void free_workspaces(void)
int i;

for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
while (!list_empty(&comp_idle_workspace[i])) {
workspace = comp_idle_workspace[i].next;
while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
workspace = btrfs_comp_ws[i].idle_ws.next;
list_del(workspace);
btrfs_compress_op[i]->free_workspace(workspace);
atomic_dec(&comp_alloc_workspace[i]);
atomic_dec(&btrfs_comp_ws[i].alloc_ws);
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions fs/btrfs/ctree.c
Original file line number Diff line number Diff line change
Expand Up @@ -4940,8 +4940,8 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
{
struct extent_buffer *leaf;
struct btrfs_item *item;
int last_off;
int dsize = 0;
u32 last_off;
u32 dsize = 0;
int ret = 0;
int wret;
int i;
Expand Down
35 changes: 22 additions & 13 deletions fs/btrfs/disk-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -3476,22 +3476,31 @@ static int barrier_all_devices(struct btrfs_fs_info *info)

int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
{
if ((flags & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_AVAIL_ALLOC_BIT_SINGLE)) ||
((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0))
return 0;
int raid_type;
int min_tolerated = INT_MAX;

if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID10))
return 1;
if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
(flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
min_tolerated = min(min_tolerated,
btrfs_raid_array[BTRFS_RAID_SINGLE].
tolerated_failures);

if (flags & BTRFS_BLOCK_GROUP_RAID6)
return 2;
for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
if (raid_type == BTRFS_RAID_SINGLE)
continue;
if (!(flags & btrfs_raid_group[raid_type]))
continue;
min_tolerated = min(min_tolerated,
btrfs_raid_array[raid_type].
tolerated_failures);
}

pr_warn("BTRFS: unknown raid type: %llu\n", flags);
return 0;
if (min_tolerated == INT_MAX) {
pr_warn("BTRFS: unknown raid flag: %llu\n", flags);
min_tolerated = 0;
}

return min_tolerated;
}

int btrfs_calc_num_tolerated_disk_barrier_failures(
Expand Down
58 changes: 25 additions & 33 deletions fs/btrfs/extent-tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -3822,7 +3822,8 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
{
u64 num_devices = root->fs_info->fs_devices->rw_devices;
u64 target;
u64 tmp;
u64 raid_type;
u64 allowed = 0;

/*
* see if restripe for this chunk_type is in progress, if so
Expand All @@ -3840,31 +3841,26 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
spin_unlock(&root->fs_info->balance_lock);

/* First, mask out the RAID levels which aren't possible */
if (num_devices == 1)
flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID5);
if (num_devices < 3)
flags &= ~BTRFS_BLOCK_GROUP_RAID6;
if (num_devices < 4)
flags &= ~BTRFS_BLOCK_GROUP_RAID10;

tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
flags &= ~tmp;

if (tmp & BTRFS_BLOCK_GROUP_RAID6)
tmp = BTRFS_BLOCK_GROUP_RAID6;
else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
tmp = BTRFS_BLOCK_GROUP_RAID5;
else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
tmp = BTRFS_BLOCK_GROUP_RAID10;
else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
tmp = BTRFS_BLOCK_GROUP_RAID1;
else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
tmp = BTRFS_BLOCK_GROUP_RAID0;

return extended_to_chunk(flags | tmp);
for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
if (num_devices >= btrfs_raid_array[raid_type].devs_min)
allowed |= btrfs_raid_group[raid_type];
}
allowed &= flags;

if (allowed & BTRFS_BLOCK_GROUP_RAID6)
allowed = BTRFS_BLOCK_GROUP_RAID6;
else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
allowed = BTRFS_BLOCK_GROUP_RAID5;
else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
allowed = BTRFS_BLOCK_GROUP_RAID10;
else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
allowed = BTRFS_BLOCK_GROUP_RAID1;
else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
allowed = BTRFS_BLOCK_GROUP_RAID0;

flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;

return extended_to_chunk(flags | allowed);
}

static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
Expand Down Expand Up @@ -4891,13 +4887,9 @@ static struct btrfs_block_rsv *get_block_rsv(
{
struct btrfs_block_rsv *block_rsv = NULL;

if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
block_rsv = trans->block_rsv;

if (root == root->fs_info->csum_root && trans->adding_csums)
block_rsv = trans->block_rsv;

if (root == root->fs_info->uuid_root)
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
(root == root->fs_info->csum_root && trans->adding_csums) ||
(root == root->fs_info->uuid_root))
block_rsv = trans->block_rsv;

if (!block_rsv)
Expand Down
5 changes: 1 addition & 4 deletions fs/btrfs/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -1469,7 +1469,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
u64 release_bytes = 0;
u64 lockstart;
u64 lockend;
unsigned long first_index;
size_t num_written = 0;
int nrptrs;
int ret = 0;
Expand All @@ -1485,8 +1484,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
if (!pages)
return -ENOMEM;

first_index = pos >> PAGE_CACHE_SHIFT;

while (iov_iter_count(i) > 0) {
size_t offset = pos & (PAGE_CACHE_SIZE - 1);
size_t write_bytes = min(iov_iter_count(i),
Expand Down Expand Up @@ -2266,7 +2263,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
u64 drop_end;
int ret = 0;
int err = 0;
int rsv_count;
unsigned int rsv_count;
bool same_page;
bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
u64 ino_size;
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/free-space-cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -1215,7 +1215,7 @@ int btrfs_wait_cache_io(struct btrfs_root *root,
* @offset - the offset for the key we'll insert
*
* This function writes out a free space cache struct to disk for quick recovery
* on mount. This will return 0 if it was successfull in writing the cache out,
* on mount. This will return 0 if it was successful in writing the cache out,
* or an errno if it was not.
*/
static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
Expand Down
5 changes: 2 additions & 3 deletions fs/btrfs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -1864,15 +1864,15 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
int ret = 0;
int skip_sum;
int metadata = 0;
int async = !atomic_read(&BTRFS_I(inode)->sync_writers);

skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;

if (btrfs_is_free_space_inode(inode))
metadata = 2;
metadata = BTRFS_WQ_ENDIO_FREE_SPACE;

if (!(rw & REQ_WRITE)) {
ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
Expand Down Expand Up @@ -2602,7 +2602,6 @@ static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
return;

list_for_each_entry_safe(old, tmp, &new->head, list) {
list_del(&old->list);
kfree(old);
}
kfree(new);
Expand Down
3 changes: 1 addition & 2 deletions fs/btrfs/ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -2699,7 +2699,6 @@ static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
{
struct btrfs_ioctl_fs_info_args *fi_args;
struct btrfs_device *device;
struct btrfs_device *next;
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
int ret = 0;

Expand All @@ -2711,7 +2710,7 @@ static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
fi_args->num_devices = fs_devices->num_devices;
memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid));

list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
list_for_each_entry(device, &fs_devices->devices, dev_list) {
if (device->devid > fi_args->max_id)
fi_args->max_id = device->devid;
}
Expand Down
Loading

0 comments on commit a0d58e4

Please sign in to comment.