Skip to content

Commit

Permalink
GFS2: speed up delete/unlink performance for large files
Browse files Browse the repository at this point in the history
This patch improves the performance of delete/unlink
operations in a GFS2 file system where the files are large
by adding a layer of metadata read-ahead for indirect blocks.
Mileage will vary, but on my system, deleting an 8.6G file
dropped from 22 seconds to about 4.5 seconds.

Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
  • Loading branch information
Bob Peterson authored and Steven Whitehouse committed Oct 21, 2011
1 parent f75bbfb commit bd5437a
Showing 1 changed file with 23 additions and 3 deletions.
26 changes: 23 additions & 3 deletions fs/gfs2/bmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -831,7 +831,7 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *bh = NULL;
__be64 *top, *bottom;
__be64 *top, *bottom, *t2;
u64 bn;
int error;
int mh_size = sizeof(struct gfs2_meta_header);
Expand Down Expand Up @@ -859,7 +859,27 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
if (error)
goto out;

if (height < ip->i_height - 1)
if (height < ip->i_height - 1) {
struct buffer_head *rabh;

for (t2 = top; t2 < bottom; t2++, first = 0) {
if (!*t2)
continue;

bn = be64_to_cpu(*t2);
rabh = gfs2_getbuf(ip->i_gl, bn, CREATE);
if (trylock_buffer(rabh)) {
if (buffer_uptodate(rabh)) {
unlock_buffer(rabh);
brelse(rabh);
continue;
}
rabh->b_end_io = end_buffer_read_sync;
submit_bh(READA | REQ_META, rabh);
continue;
}
brelse(rabh);
}
for (; top < bottom; top++, first = 0) {
if (!*top)
continue;
Expand All @@ -871,7 +891,7 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
if (error)
break;
}

}
out:
brelse(bh);
return error;
Expand Down

0 comments on commit bd5437a

Please sign in to comment.