Skip to content

Commit

Permalink
GFS2: Fix glock refcount issues
Browse files Browse the repository at this point in the history
This patch fixes some ref counting issues. Firstly by moving
the point at which we drop the ref count after a dlm lock
operation has completed we ensure that we never call
gfs2_glock_hold() on a lock with a zero ref count.

Secondly, by using atomic_dec_and_lock() in gfs2_glock_put()
we ensure that at no time will a glock with zero ref count
appear on the lru_list. That means that we can remove the
check for this in our shrinker (which was racy).

Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
  • Loading branch information
Steven Whitehouse committed Dec 3, 2009
1 parent c29cd90 commit 26bb750
Showing 1 changed file with 8 additions and 13 deletions.
21 changes: 8 additions & 13 deletions fs/gfs2/glock.c
Original file line number Diff line number Diff line change
Expand Up @@ -241,15 +241,14 @@ int gfs2_glock_put(struct gfs2_glock *gl)
int rv = 0;

write_lock(gl_lock_addr(gl->gl_hash));
if (atomic_dec_and_test(&gl->gl_ref)) {
if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
hlist_del(&gl->gl_list);
write_unlock(gl_lock_addr(gl->gl_hash));
spin_lock(&lru_lock);
if (!list_empty(&gl->gl_lru)) {
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);
}
spin_unlock(&lru_lock);
write_unlock(gl_lock_addr(gl->gl_hash));
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
glock_free(gl);
rv = 1;
Expand Down Expand Up @@ -513,7 +512,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
GLOCK_BUG_ON(gl, 1);
}
spin_unlock(&gl->gl_spin);
gfs2_glock_put(gl);
return;
}

Expand All @@ -524,8 +522,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
if (glops->go_xmote_bh) {
spin_unlock(&gl->gl_spin);
rv = glops->go_xmote_bh(gl, gh);
if (rv == -EAGAIN)
return;
spin_lock(&gl->gl_spin);
if (rv) {
do_error(gl, rv);
Expand All @@ -540,7 +536,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
clear_bit(GLF_LOCK, &gl->gl_flags);
out_locked:
spin_unlock(&gl->gl_spin);
gfs2_glock_put(gl);
}

static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
Expand Down Expand Up @@ -600,7 +595,6 @@ __acquires(&gl->gl_spin)

if (!(ret & LM_OUT_ASYNC)) {
finish_xmote(gl, ret);
gfs2_glock_hold(gl);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
} else {
Expand Down Expand Up @@ -712,9 +706,12 @@ static void glock_work_func(struct work_struct *work)
{
unsigned long delay = 0;
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
int drop_ref = 0;

if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
finish_xmote(gl, gl->gl_reply);
drop_ref = 1;
}
down_read(&gfs2_umount_flush_sem);
spin_lock(&gl->gl_spin);
if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
Expand All @@ -732,6 +729,8 @@ static void glock_work_func(struct work_struct *work)
if (!delay ||
queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl);
if (drop_ref)
gfs2_glock_put(gl);
}

/**
Expand Down Expand Up @@ -1366,10 +1365,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);

/* Check if glock is about to be freed */
if (atomic_read(&gl->gl_ref) == 0)
continue;

/* Test for being demotable */
if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
gfs2_glock_hold(gl);
Expand Down

0 comments on commit 26bb750

Please sign in to comment.