Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 46904
b: refs/heads/master
c: ddc09c8
h: refs/heads/master
v: v3
  • Loading branch information
Kurt Hackel authored and Mark Fasheh committed Feb 7, 2007
1 parent 118830b commit 59b8430
Show file tree
Hide file tree
Showing 4 changed files with 34 additions and 15 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: faf0ec9f13defb57f4269ecb22ed86f2874ee89a
refs/heads/master: ddc09c8ddac8d0f170ba8caa8128801f358dccff
1 change: 1 addition & 0 deletions trunk/fs/ocfs2/dlm/dlmcommon.h
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,7 @@ static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
#define DLM_LOCK_RES_IN_PROGRESS 0x00000010
#define DLM_LOCK_RES_MIGRATING 0x00000020
#define DLM_LOCK_RES_DROPPING_REF 0x00000040
#define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000

/* max milliseconds to wait to sync up a network failure with a node death */
#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
Expand Down
16 changes: 15 additions & 1 deletion trunk/fs/ocfs2/dlm/dlmmaster.c
Original file line number Diff line number Diff line change
Expand Up @@ -2707,8 +2707,15 @@ static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
__dlm_lockres_reserve_ast(res);
spin_unlock(&res->spinlock);

/* now flush all the pending asts.. hang out for a bit */
/* now flush all the pending asts */
dlm_kick_thread(dlm, res);
/* before waiting on DIRTY, block processes which may
* try to dirty the lockres before MIGRATING is set */
spin_lock(&res->spinlock);
BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
spin_unlock(&res->spinlock);
/* now wait on any pending asts and the DIRTY state */
wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
dlm_lockres_release_ast(dlm, res);

Expand All @@ -2734,6 +2741,13 @@ static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
mlog(0, "trying again...\n");
goto again;
}
/* now that we are sure the MIGRATING state is there, drop
* the unneded state which blocked threads trying to DIRTY */
spin_lock(&res->spinlock);
BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
spin_unlock(&res->spinlock);

/* did the target go down or die? */
spin_lock(&dlm->spinlock);
Expand Down
30 changes: 17 additions & 13 deletions trunk/fs/ocfs2/dlm/dlmthread.c
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
int __dlm_lockres_unused(struct dlm_lock_resource *res)
{
if (!__dlm_lockres_has_locks(res) &&
list_empty(&res->dirty)) {
(list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
/* try not to scan the bitmap unless the first two
* conditions are already true */
int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
Expand Down Expand Up @@ -455,12 +455,17 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
assert_spin_locked(&res->spinlock);

/* don't shuffle secondary queues */
if ((res->owner == dlm->node_num) &&
!(res->state & DLM_LOCK_RES_DIRTY)) {
/* ref for dirty_list */
dlm_lockres_get(res);
list_add_tail(&res->dirty, &dlm->dirty_list);
res->state |= DLM_LOCK_RES_DIRTY;
if ((res->owner == dlm->node_num)) {
if (res->state & (DLM_LOCK_RES_MIGRATING |
DLM_LOCK_RES_BLOCK_DIRTY))
return;

if (list_empty(&res->dirty)) {
/* ref for dirty_list */
dlm_lockres_get(res);
list_add_tail(&res->dirty, &dlm->dirty_list);
res->state |= DLM_LOCK_RES_DIRTY;
}
}
}

Expand Down Expand Up @@ -639,7 +644,7 @@ static int dlm_thread(void *data)
dlm_lockres_get(res);

spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_DIRTY;
/* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
list_del_init(&res->dirty);
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
Expand All @@ -663,10 +668,11 @@ static int dlm_thread(void *data)
/* it is now ok to move lockreses in these states
* to the dirty list, assuming that they will only be
* dirty for a short while. */
BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
DLM_LOCK_RES_MIGRATING |
DLM_LOCK_RES_RECOVERING)) {
/* move it to the tail and keep going */
res->state &= ~DLM_LOCK_RES_DIRTY;
spin_unlock(&res->spinlock);
mlog(0, "delaying list shuffling for in-"
"progress lockres %.*s, state=%d\n",
Expand All @@ -687,6 +693,7 @@ static int dlm_thread(void *data)

/* called while holding lockres lock */
dlm_shuffle_lists(dlm, res);
res->state &= ~DLM_LOCK_RES_DIRTY;
spin_unlock(&res->spinlock);

dlm_lockres_calc_usage(dlm, res);
Expand All @@ -697,11 +704,8 @@ static int dlm_thread(void *data)
/* if the lock was in-progress, stick
* it on the back of the list */
if (delay) {
/* ref for dirty_list */
dlm_lockres_get(res);
spin_lock(&res->spinlock);
list_add_tail(&res->dirty, &dlm->dirty_list);
res->state |= DLM_LOCK_RES_DIRTY;
__dlm_dirty_lockres(dlm, res);
spin_unlock(&res->spinlock);
}
dlm_lockres_put(res);
Expand Down

0 comments on commit 59b8430

Please sign in to comment.