Skip to content

Commit

Permalink
[GFS2] Replace rgrp "recent list" with mru list
Browse files Browse the repository at this point in the history
This patch removes the "recent list" which is used during allocation
and replaces it with the (already existing) mru list used during
deletion. The "recent list" was not a true mru list leading to a number
of inefficiencies including a "next" function which made scanning the
list an order N^2 operation wrt to the number of list elements.

This should increase allocation performance with large numbers of rgrps.
Its also a useful preparation and cleanup before some further changes
which are planned in this area.

Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
  • Loading branch information
Steven Whitehouse committed Jul 10, 2008
1 parent 209806a commit 9cabcdb
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 99 deletions.
2 changes: 0 additions & 2 deletions fs/gfs2/incore.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,6 @@ struct gfs2_rgrp_host {
struct gfs2_rgrpd {
struct list_head rd_list; /* Link with superblock */
struct list_head rd_list_mru;
struct list_head rd_recent; /* Recently used rgrps */
struct gfs2_glock *rd_gl; /* Glock for this rgrp */
u64 rd_addr; /* grp block disk address */
u64 rd_data0; /* first data location */
Expand Down Expand Up @@ -529,7 +528,6 @@ struct gfs2_sbd {
struct mutex sd_rindex_mutex;
struct list_head sd_rindex_list;
struct list_head sd_rindex_mru_list;
struct list_head sd_rindex_recent_list;
struct gfs2_rgrpd *sd_rindex_forward;
unsigned int sd_rgrps;

Expand Down
1 change: 0 additions & 1 deletion fs/gfs2/ops_fstype.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
mutex_init(&sdp->sd_rindex_mutex);
INIT_LIST_HEAD(&sdp->sd_rindex_list);
INIT_LIST_HEAD(&sdp->sd_rindex_mru_list);
INIT_LIST_HEAD(&sdp->sd_rindex_recent_list);

INIT_LIST_HEAD(&sdp->sd_jindex_list);
spin_lock_init(&sdp->sd_jindex_spin);
Expand Down
108 changes: 12 additions & 96 deletions fs/gfs2/rgrp.c
Original file line number Diff line number Diff line change
Expand Up @@ -371,11 +371,6 @@ static void clear_rgrpdi(struct gfs2_sbd *sdp)

spin_lock(&sdp->sd_rindex_spin);
sdp->sd_rindex_forward = NULL;
head = &sdp->sd_rindex_recent_list;
while (!list_empty(head)) {
rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
list_del(&rgd->rd_recent);
}
spin_unlock(&sdp->sd_rindex_spin);

head = &sdp->sd_rindex_list;
Expand Down Expand Up @@ -944,107 +939,30 @@ static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
return NULL;
}

/**
* recent_rgrp_first - get first RG from "recent" list
* @sdp: The GFS2 superblock
* @rglast: address of the rgrp used last
*
* Returns: The first rgrp in the recent list
*/

static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp,
u64 rglast)
{
struct gfs2_rgrpd *rgd;

spin_lock(&sdp->sd_rindex_spin);

if (rglast) {
list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
if (rgrp_contains_block(rgd, rglast))
goto out;
}
}
rgd = NULL;
if (!list_empty(&sdp->sd_rindex_recent_list))
rgd = list_entry(sdp->sd_rindex_recent_list.next,
struct gfs2_rgrpd, rd_recent);
out:
spin_unlock(&sdp->sd_rindex_spin);
return rgd;
}

/**
* recent_rgrp_next - get next RG from "recent" list
* @cur_rgd: current rgrp
* @remove:
*
* Returns: The next rgrp in the recent list
*/

static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd,
int remove)
static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd)
{
struct gfs2_sbd *sdp = cur_rgd->rd_sbd;
struct list_head *head;
struct gfs2_rgrpd *rgd;

spin_lock(&sdp->sd_rindex_spin);

head = &sdp->sd_rindex_recent_list;

list_for_each_entry(rgd, head, rd_recent) {
if (rgd == cur_rgd) {
if (cur_rgd->rd_recent.next != head)
rgd = list_entry(cur_rgd->rd_recent.next,
struct gfs2_rgrpd, rd_recent);
else
rgd = NULL;

if (remove)
list_del(&cur_rgd->rd_recent);

goto out;
}
head = &sdp->sd_rindex_mru_list;
if (unlikely(cur_rgd->rd_list_mru.next == head)) {
spin_unlock(&sdp->sd_rindex_spin);
return NULL;
}

rgd = NULL;
if (!list_empty(head))
rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);

out:
rgd = list_entry(cur_rgd->rd_list_mru.next, struct gfs2_rgrpd, rd_list_mru);
spin_unlock(&sdp->sd_rindex_spin);
return rgd;
}

/**
* recent_rgrp_add - add an RG to tail of "recent" list
* @new_rgd: The rgrp to add
*
*/

static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd)
{
struct gfs2_sbd *sdp = new_rgd->rd_sbd;
struct gfs2_rgrpd *rgd;
unsigned int count = 0;
unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp);

spin_lock(&sdp->sd_rindex_spin);

list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
if (rgd == new_rgd)
goto out;

if (++count >= max)
goto out;
}
list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list);

out:
spin_unlock(&sdp->sd_rindex_spin);
}

/**
* forward_rgrp_get - get an rgrp to try next from full list
* @sdp: The GFS2 superblock
Expand Down Expand Up @@ -1112,9 +1030,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
int loops = 0;
int error, rg_locked;

/* Try recently successful rgrps */

rgd = recent_rgrp_first(sdp, ip->i_goal);
rgd = gfs2_blk2rgrpd(sdp, ip->i_goal);

while (rgd) {
rg_locked = 0;
Expand All @@ -1136,11 +1052,9 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
gfs2_glock_dq_uninit(&al->al_rgd_gh);
if (inode)
return inode;
rgd = recent_rgrp_next(rgd, 1);
break;

/* fall through */
case GLR_TRYFAILED:
rgd = recent_rgrp_next(rgd, 0);
rgd = recent_rgrp_next(rgd);
break;

default:
Expand Down Expand Up @@ -1199,7 +1113,9 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)

out:
if (begin) {
recent_rgrp_add(rgd);
spin_lock(&sdp->sd_rindex_spin);
list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
spin_unlock(&sdp->sd_rindex_spin);
rgd = gfs2_rgrpd_get_next(rgd);
if (!rgd)
rgd = gfs2_rgrpd_get_first(sdp);
Expand Down

0 comments on commit 9cabcdb

Please sign in to comment.