From 43d26689fb9e01b69d1018435cf8e24066d8a5fa Mon Sep 17 00:00:00 2001 From: Kurt Hackel Date: Wed, 17 Jan 2007 14:53:37 -0800 Subject: [PATCH] --- yaml --- r: 46907 b: refs/heads/master c: 1cd04dbe3364be71b93e3aaf4545daa1e261aaa1 h: refs/heads/master i: 46905: 341a72da3b0cb03ff9d78f0cca4586da13105b2a 46903: 118830b70165c24422f4946829bf28566379c079 v: v3 --- [refs] | 2 +- trunk/fs/ocfs2/dlm/dlmmaster.c | 36 +++++++++++++++++++--------------- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/[refs] b/[refs] index 2fa698c104c1..fedcb3b68335 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: e17e75ecb86b8ce9b51b219b5348517561031f80 +refs/heads/master: 1cd04dbe3364be71b93e3aaf4545daa1e261aaa1 diff --git a/trunk/fs/ocfs2/dlm/dlmmaster.c b/trunk/fs/ocfs2/dlm/dlmmaster.c index a65a87726d6a..b36cce034ea0 100644 --- a/trunk/fs/ocfs2/dlm/dlmmaster.c +++ b/trunk/fs/ocfs2/dlm/dlmmaster.c @@ -1507,10 +1507,11 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data) /* take care of the easy cases up front */ spin_lock(&res->spinlock); - if (res->state & DLM_LOCK_RES_RECOVERING) { + if (res->state & (DLM_LOCK_RES_RECOVERING| + DLM_LOCK_RES_MIGRATING)) { spin_unlock(&res->spinlock); mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " - "being recovered\n"); + "being recovered/migrated\n"); response = DLM_MASTER_RESP_ERROR; if (mle) kmem_cache_free(dlm_mle_cache, mle); @@ -2493,6 +2494,9 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, * the lockres */ + /* now that remote nodes are spinning on the MIGRATING flag, + * ensure that all assert_master work is flushed. */ + flush_workqueue(dlm->dlm_worker); /* get an extra reference on the mle. * otherwise the assert_master from the new @@ -2547,7 +2551,8 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, res->owner == target) break; - mlog(0, "timed out during migration\n"); + mlog(0, "%s:%.*s: timed out during migration\n", + dlm->name, res->lockname.len, res->lockname.name); /* avoid hang during shutdown when migrating lockres * to a node which also goes down */ if (dlm_is_node_dead(dlm, target)) { @@ -2555,20 +2560,19 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, "target %u is no longer up, restarting\n", dlm->name, res->lockname.len, res->lockname.name, target); - ret = -ERESTARTSYS; + ret = -EINVAL; + /* migration failed, detach and clean up mle */ + dlm_mle_detach_hb_events(dlm, mle); + dlm_put_mle(mle); + dlm_put_mle_inuse(mle); + spin_lock(&res->spinlock); + res->state &= ~DLM_LOCK_RES_MIGRATING; + spin_unlock(&res->spinlock); + goto leave; } - } - if (ret == -ERESTARTSYS) { - /* migration failed, detach and clean up mle */ - dlm_mle_detach_hb_events(dlm, mle); - dlm_put_mle(mle); - dlm_put_mle_inuse(mle); - spin_lock(&res->spinlock); - res->state &= ~DLM_LOCK_RES_MIGRATING; - spin_unlock(&res->spinlock); - goto leave; - } - /* TODO: if node died: stop, clean up, return error */ + } else + mlog(0, "%s:%.*s: caught signal during migration\n", + dlm->name, res->lockname.len, res->lockname.name); } /* all done, set the owner, clear the flag */