Skip to content

Commit

Permalink
[PATCH] ext3 and jbd cleanup: remove whitespace
Browse files Browse the repository at this point in the history
Remove whitespace from ext3 and jbd, before we clone ext4.

Signed-off-by: Mingming Cao<cmm@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Mingming Cao authored and Linus Torvalds committed Sep 27, 2006
1 parent e7ab8d6 commit ae6ddcc
Show file tree
Hide file tree
Showing 17 changed files with 307 additions and 307 deletions.
16 changes: 8 additions & 8 deletions fs/ext3/balloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
}

/*
* Read the bitmap for a given block_group, reading into the specified
* Read the bitmap for a given block_group, reading into the specified
* slot in the superblock's bitmap cache.
*
* Return buffer_head on success or NULL in case of failure.
Expand Down Expand Up @@ -419,8 +419,8 @@ void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
}
/* @@@ This prevents newly-allocated data from being
* freed and then reallocated within the same
* transaction.
*
* transaction.
*
* Ideally we would want to allow that to happen, but to
* do so requires making journal_forget() capable of
* revoking the queued write of a data block, which
Expand All @@ -433,7 +433,7 @@ void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
* safe not to set the allocation bit in the committed
* bitmap, because we know that there is no outstanding
* activity on the buffer any more and so it is safe to
* reallocate it.
* reallocate it.
*/
BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
J_ASSERT_BH(bitmap_bh,
Expand Down Expand Up @@ -518,7 +518,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
* data would allow the old block to be overwritten before the
* transaction committed (because we force data to disk before commit).
* This would lead to corruption if we crashed between overwriting the
* data and committing the delete.
* data and committing the delete.
*
* @@@ We may want to make this allocation behaviour conditional on
* data-writes at some point, and disable it for metadata allocations or
Expand Down Expand Up @@ -584,7 +584,7 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,

if (start > 0) {
/*
* The goal was occupied; search forward for a free
* The goal was occupied; search forward for a free
* block within the next XX blocks.
*
* end_goal is more or less random, but it has to be
Expand Down Expand Up @@ -1194,7 +1194,7 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries)
/*
* ext3_new_block uses a goal block to assist allocation. If the goal is
* free, or there is a free block within 32 blocks of the goal, that block
* is allocated. Otherwise a forward search is made for a free block; within
* is allocated. Otherwise a forward search is made for a free block; within
* each block group the search first looks for an entire free byte in the block
* bitmap, and then for any free bit if that fails.
* This function also updates quota and i_blocks field.
Expand Down Expand Up @@ -1303,7 +1303,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
smp_rmb();

/*
* Now search the rest of the groups. We assume that
* Now search the rest of the groups. We assume that
* i and gdp correctly point to the last group visited.
*/
for (bgi = 0; bgi < ngroups; bgi++) {
Expand Down
2 changes: 1 addition & 1 deletion fs/ext3/bitmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ unsigned long ext3_count_free (struct buffer_head * map, unsigned int numchars)
unsigned int i;
unsigned long sum = 0;

if (!map)
if (!map)
return (0);
for (i = 0; i < numchars; i++)
sum += nibblemap[map->b_data[i] & 0xf] +
Expand Down
14 changes: 7 additions & 7 deletions fs/ext3/dir.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)

return (ext3_filetype_table[filetype]);
}


int ext3_check_dir_entry (const char * function, struct inode * dir,
struct ext3_dir_entry_2 * de,
Expand Down Expand Up @@ -162,7 +162,7 @@ static int ext3_readdir(struct file * filp,
* to make sure. */
if (filp->f_version != inode->i_version) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ext3_dir_entry_2 *)
de = (struct ext3_dir_entry_2 *)
(bh->b_data + i);
/* It's too expensive to do a full
* dirent test each time round this
Expand All @@ -181,7 +181,7 @@ static int ext3_readdir(struct file * filp,
filp->f_version = inode->i_version;
}

while (!error && filp->f_pos < inode->i_size
while (!error && filp->f_pos < inode->i_size
&& offset < sb->s_blocksize) {
de = (struct ext3_dir_entry_2 *) (bh->b_data + offset);
if (!ext3_check_dir_entry ("ext3_readdir", inode, de,
Expand Down Expand Up @@ -229,7 +229,7 @@ static int ext3_readdir(struct file * filp,
/*
* These functions convert from the major/minor hash to an f_pos
* value.
*
*
* Currently we only use major hash numer. This is unfortunate, but
* on 32-bit machines, the same VFS interface is used for lseek and
* llseek, so if we use the 64 bit offset, then the 32-bit versions of
Expand All @@ -250,7 +250,7 @@ static int ext3_readdir(struct file * filp,
struct fname {
__u32 hash;
__u32 minor_hash;
struct rb_node rb_hash;
struct rb_node rb_hash;
struct fname *next;
__u32 inode;
__u8 name_len;
Expand Down Expand Up @@ -410,7 +410,7 @@ static int call_filldir(struct file * filp, void * dirent,
curr_pos = hash2pos(fname->hash, fname->minor_hash);
while (fname) {
error = filldir(dirent, fname->name,
fname->name_len, curr_pos,
fname->name_len, curr_pos,
fname->inode,
get_dtype(sb, fname->file_type));
if (error) {
Expand Down Expand Up @@ -465,7 +465,7 @@ static int ext3_dx_readdir(struct file * filp,
/*
* Fill the rbtree if we have no more entries,
* or the inode has changed since we last read in the
* cached entries.
* cached entries.
*/
if ((!info->curr_node) ||
(filp->f_version != inode->i_version)) {
Expand Down
2 changes: 1 addition & 1 deletion fs/ext3/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ ext3_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t

force_commit:
err = ext3_force_commit(inode->i_sb);
if (err)
if (err)
return err;
return ret;
}
Expand Down
6 changes: 3 additions & 3 deletions fs/ext3/fsync.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,14 @@
* Universite Pierre et Marie Curie (Paris VI)
* from
* linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds
*
*
* ext3fs fsync primitive
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
*
*
* Removed unnecessary code duplication for little endian machines
* and excessive __inline__s.
* and excessive __inline__s.
* Andi Kleen, 1997
*
* Major simplications and cleanup - we only need to do the metadata, because
Expand Down
6 changes: 3 additions & 3 deletions fs/ext3/hash.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
* Copyright (C) 2002 by Theodore Ts'o
*
* This file is released under the GPL v2.
*
*
* This file may be redistributed under the terms of the GNU Public
* License.
*/
Expand Down Expand Up @@ -80,11 +80,11 @@ static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
* Returns the hash of a filename. If len is 0 and name is NULL, then
* this function can be used to test whether or not a hash version is
* supported.
*
*
* The seed is an 4 longword (32 bits) "secret" which can be used to
* uniquify a hash. If the seed is all zero's, then some default seed
* may be used.
*
*
* A particular hash version specifies whether or not the seed is
* represented, and whether or not the returned hash is 32 bits or 64
* bits. 32 bit hashes will return 0 for the minor hash.
Expand Down
48 changes: 24 additions & 24 deletions fs/ext3/ialloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ static int find_group_dir(struct super_block *sb, struct inode *parent)
continue;
if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
continue;
if (!best_desc ||
if (!best_desc ||
(le16_to_cpu(desc->bg_free_blocks_count) >
le16_to_cpu(best_desc->bg_free_blocks_count))) {
best_group = group;
Expand All @@ -226,30 +226,30 @@ static int find_group_dir(struct super_block *sb, struct inode *parent)
return best_group;
}

/*
* Orlov's allocator for directories.
*
/*
* Orlov's allocator for directories.
*
* We always try to spread first-level directories.
*
* If there are blockgroups with both free inodes and free blocks counts
* not worse than average we return one with smallest directory count.
* Otherwise we simply return a random group.
*
* For the rest rules look so:
*
* It's OK to put directory into a group unless
* it has too many directories already (max_dirs) or
* it has too few free inodes left (min_inodes) or
* it has too few free blocks left (min_blocks) or
* it's already running too large debt (max_debt).
* Parent's group is prefered, if it doesn't satisfy these
* conditions we search cyclically through the rest. If none
* of the groups look good we just look for a group with more
* free inodes than average (starting at parent's group).
*
* Debt is incremented each time we allocate a directory and decremented
* when we allocate an inode, within 0--255.
*/
* If there are blockgroups with both free inodes and free blocks counts
* not worse than average we return one with smallest directory count.
* Otherwise we simply return a random group.
*
* For the rest rules look so:
*
* It's OK to put directory into a group unless
* it has too many directories already (max_dirs) or
* it has too few free inodes left (min_inodes) or
* it has too few free blocks left (min_blocks) or
* it's already running too large debt (max_debt).
* Parent's group is prefered, if it doesn't satisfy these
* conditions we search cyclically through the rest. If none
* of the groups look good we just look for a group with more
* free inodes than average (starting at parent's group).
*
* Debt is incremented each time we allocate a directory and decremented
* when we allocate an inode, within 0--255.
*/

#define INODE_COST 64
#define BLOCK_COST 256
Expand Down Expand Up @@ -454,7 +454,7 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
group = find_group_dir(sb, dir);
else
group = find_group_orlov(sb, dir);
} else
} else
group = find_group_other(sb, dir);

err = -ENOSPC;
Expand Down
Loading

0 comments on commit ae6ddcc

Please sign in to comment.