Skip to content

Commit

Permalink
btrfs: remove unlikely from data-dependent branches and slow paths
Browse files Browse the repository at this point in the history
There are the branch hints that obviously depend on the data being
processed, the CPU predictor will do better job according to the actual
load. It also does not make sense to use the hints in slow paths that do
a lot of other operations like locking, waiting or IO.

Signed-off-by: David Sterba <dsterba@suse.cz>
  • Loading branch information
David Sterba committed Oct 2, 2014
1 parent 5d99a99 commit ee39b43
Show file tree
Hide file tree
Showing 5 changed files with 10 additions and 10 deletions.
4 changes: 2 additions & 2 deletions fs/btrfs/extent-tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -9694,15 +9694,15 @@ void btrfs_end_nocow_write(struct btrfs_root *root)

int btrfs_start_nocow_write(struct btrfs_root *root)
{
if (unlikely(atomic_read(&root->will_be_snapshoted)))
if (atomic_read(&root->will_be_snapshoted))
return 0;

percpu_counter_inc(&root->subv_writers->counter);
/*
* Make sure counter is updated before we check for snapshot creation.
*/
smp_mb();
if (unlikely(atomic_read(&root->will_be_snapshoted))) {
if (atomic_read(&root->will_be_snapshoted)) {
btrfs_end_nocow_write(root);
return 0;
}
Expand Down
4 changes: 2 additions & 2 deletions fs/btrfs/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -452,7 +452,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
if (unlikely(copied == 0))
break;

if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
if (copied < PAGE_CACHE_SIZE - offset) {
offset += copied;
} else {
pg++;
Expand Down Expand Up @@ -1792,7 +1792,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
if (sync)
atomic_inc(&BTRFS_I(inode)->sync_writers);

if (unlikely(file->f_flags & O_DIRECT)) {
if (file->f_flags & O_DIRECT) {
num_written = __btrfs_direct_write(iocb, from, pos);
} else {
num_written = __btrfs_buffered_write(file, from, pos);
Expand Down
8 changes: 4 additions & 4 deletions fs/btrfs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -7802,9 +7802,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
atomic_inc(&dip->pending_bios);

while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
if (unlikely(map_length < submit_len + bvec->bv_len ||
if (map_length < submit_len + bvec->bv_len ||
bio_add_page(bio, bvec->bv_page, bvec->bv_len,
bvec->bv_offset) < bvec->bv_len)) {
bvec->bv_offset) < bvec->bv_len) {
/*
* inc the count before we submit the bio so
* we know the end IO handler won't happen before
Expand Down Expand Up @@ -8017,8 +8017,8 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
ret = btrfs_delalloc_reserve_space(inode, count);
if (ret)
goto out;
} else if (unlikely(test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
&BTRFS_I(inode)->runtime_flags))) {
} else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
&BTRFS_I(inode)->runtime_flags)) {
inode_dio_done(inode);
flags = DIO_LOCKING | DIO_SKIP_HOLES;
wakeup = false;
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -3167,7 +3167,7 @@ static void clone_update_extent_map(struct inode *inode,
em->start + em->len - 1, 0);
}

if (unlikely(ret))
if (ret)
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
}
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/transaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -418,7 +418,7 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
/*
* Do the reservation for the relocation root creation
*/
if (unlikely(need_reserve_reloc_root(root))) {
if (need_reserve_reloc_root(root)) {
num_bytes += root->nodesize;
reloc_reserved = true;
}
Expand Down

0 comments on commit ee39b43

Please sign in to comment.