Skip to content

Commit

Permalink
Merge branch 'upstream-linus' of git://oss.oracle.com/home/sourcebo/g…
Browse files Browse the repository at this point in the history
…it/ocfs2

* 'upstream-linus' of git://oss.oracle.com/home/sourcebo/git/ocfs2:
  [PATCH] CONFIGFS_FS must depend on SYSFS
  [PATCH] Bogus NULL pointer check in fs/configfs/dir.c
  ocfs2: Better I/O error handling in heartbeat
  ocfs2: test and set teardown flag early in user_dlm_destroy_lock()
  ocfs2: Handle the DLM_CANCELGRANT case in user_unlock_ast()
  ocfs2: catch an invalid ast case in dlmfs
  ocfs2: remove an overly aggressive BUG() in dlmfs
  ocfs2: multi node truncate fix
  • Loading branch information
Linus Torvalds committed Apr 10, 2006
2 parents de12a78 + 65714b9 commit e38d557
Show file tree
Hide file tree
Showing 5 changed files with 101 additions and 36 deletions.
2 changes: 1 addition & 1 deletion fs/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -861,7 +861,7 @@ config RAMFS

config CONFIGFS_FS
tristate "Userspace-driven configuration filesystem (EXPERIMENTAL)"
depends on EXPERIMENTAL
depends on SYSFS && EXPERIMENTAL
help
configfs is a ram-based filesystem that provides the converse
of sysfs's functionality. Where sysfs is a filesystem-based
Expand Down
2 changes: 1 addition & 1 deletion fs/configfs/dir.c
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,7 @@ static int populate_groups(struct config_group *group)
int ret = 0;
int i;

if (group && group->default_groups) {
if (group->default_groups) {
/* FYI, we're faking mkdir here
* I'm not sure we need this semaphore, as we're called
* from our parent's mkdir. That holds our parent's
Expand Down
40 changes: 32 additions & 8 deletions fs/ocfs2/cluster/heartbeat.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,7 @@ struct o2hb_region {
struct o2hb_bio_wait_ctxt {
atomic_t wc_num_reqs;
struct completion wc_io_complete;
int wc_error;
};

static void o2hb_write_timeout(void *arg)
Expand Down Expand Up @@ -186,6 +187,7 @@ static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc,
{
atomic_set(&wc->wc_num_reqs, num_ios);
init_completion(&wc->wc_io_complete);
wc->wc_error = 0;
}

/* Used in error paths too */
Expand Down Expand Up @@ -218,8 +220,10 @@ static int o2hb_bio_end_io(struct bio *bio,
{
struct o2hb_bio_wait_ctxt *wc = bio->bi_private;

if (error)
if (error) {
mlog(ML_ERROR, "IO Error %d\n", error);
wc->wc_error = error;
}

if (bio->bi_size)
return 1;
Expand Down Expand Up @@ -390,6 +394,8 @@ static int o2hb_read_slots(struct o2hb_region *reg,

bail_and_wait:
o2hb_wait_on_io(reg, &wc);
if (wc.wc_error && !status)
status = wc.wc_error;

if (bios) {
for(i = 0; i < num_bios; i++)
Expand Down Expand Up @@ -790,20 +796,24 @@ static int o2hb_highest_node(unsigned long *nodes,
return highest;
}

static void o2hb_do_disk_heartbeat(struct o2hb_region *reg)
static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
{
int i, ret, highest_node, change = 0;
unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
struct bio *write_bio;
struct o2hb_bio_wait_ctxt write_wc;

if (o2nm_configured_node_map(configured_nodes, sizeof(configured_nodes)))
return;
ret = o2nm_configured_node_map(configured_nodes,
sizeof(configured_nodes));
if (ret) {
mlog_errno(ret);
return ret;
}

highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
if (highest_node >= O2NM_MAX_NODES) {
mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n");
return;
return -EINVAL;
}

/* No sense in reading the slots of nodes that don't exist
Expand All @@ -813,7 +823,7 @@ static void o2hb_do_disk_heartbeat(struct o2hb_region *reg)
ret = o2hb_read_slots(reg, highest_node + 1);
if (ret < 0) {
mlog_errno(ret);
return;
return ret;
}

/* With an up to date view of the slots, we can check that no
Expand All @@ -831,7 +841,7 @@ static void o2hb_do_disk_heartbeat(struct o2hb_region *reg)
ret = o2hb_issue_node_write(reg, &write_bio, &write_wc);
if (ret < 0) {
mlog_errno(ret);
return;
return ret;
}

i = -1;
Expand All @@ -847,13 +857,24 @@ static void o2hb_do_disk_heartbeat(struct o2hb_region *reg)
*/
o2hb_wait_on_io(reg, &write_wc);
bio_put(write_bio);
if (write_wc.wc_error) {
/* Do not re-arm the write timeout on I/O error - we
* can't be sure that the new block ever made it to
* disk */
mlog(ML_ERROR, "Write error %d on device \"%s\"\n",
write_wc.wc_error, reg->hr_dev_name);
return write_wc.wc_error;
}

o2hb_arm_write_timeout(reg);

/* let the person who launched us know when things are steady */
if (!change && (atomic_read(&reg->hr_steady_iterations) != 0)) {
if (atomic_dec_and_test(&reg->hr_steady_iterations))
wake_up(&o2hb_steady_queue);
}

return 0;
}

/* Subtract b from a, storing the result in a. a *must* have a larger
Expand Down Expand Up @@ -913,7 +934,10 @@ static int o2hb_thread(void *data)
* likely to time itself out. */
do_gettimeofday(&before_hb);

o2hb_do_disk_heartbeat(reg);
i = 0;
do {
ret = o2hb_do_disk_heartbeat(reg);
} while (ret && ++i < 2);

do_gettimeofday(&after_hb);
elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
Expand Down
74 changes: 56 additions & 18 deletions fs/ocfs2/dlm/userdlm.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,10 @@ static void user_ast(void *opaque)
return;
}

mlog_bug_on_msg(lockres->l_requested == LKM_IVMODE,
"Lockres %s, requested ivmode. flags 0x%x\n",
lockres->l_name, lockres->l_flags);

/* we're downconverting. */
if (lockres->l_requested < lockres->l_level) {
if (lockres->l_requested <=
Expand Down Expand Up @@ -229,23 +233,42 @@ static void user_unlock_ast(void *opaque, enum dlm_status status)

mlog(0, "UNLOCK AST called on lock %s\n", lockres->l_name);

if (status != DLM_NORMAL)
if (status != DLM_NORMAL && status != DLM_CANCELGRANT)
mlog(ML_ERROR, "Dlm returns status %d\n", status);

spin_lock(&lockres->l_lock);
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN)
/* The teardown flag gets set early during the unlock process,
* so test the cancel flag to make sure that this ast isn't
* for a concurrent cancel. */
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN
&& !(lockres->l_flags & USER_LOCK_IN_CANCEL)) {
lockres->l_level = LKM_IVMODE;
else {
} else if (status == DLM_CANCELGRANT) {
mlog(0, "Lock %s, cancel fails, flags 0x%x\n",
lockres->l_name, lockres->l_flags);
/* We tried to cancel a convert request, but it was
* already granted. Don't clear the busy flag - the
* ast should've done this already. */
BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
goto out_noclear;
} else {
BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
/* Cancel succeeded, we want to re-queue */
mlog(0, "Lock %s, cancel succeeds, flags 0x%x\n",
lockres->l_name, lockres->l_flags);
lockres->l_requested = LKM_IVMODE; /* cancel an
* upconvert
* request. */
lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
/* we want the unblock thread to look at it again
* now. */
__user_dlm_queue_lockres(lockres);
if (lockres->l_flags & USER_LOCK_BLOCKED)
__user_dlm_queue_lockres(lockres);
}

lockres->l_flags &= ~USER_LOCK_BUSY;
out_noclear:
spin_unlock(&lockres->l_lock);

wake_up(&lockres->l_event);
Expand All @@ -268,21 +291,36 @@ static void user_dlm_unblock_lock(void *opaque)

spin_lock(&lockres->l_lock);

BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED));
BUG_ON(!(lockres->l_flags & USER_LOCK_QUEUED));
mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED),
"Lockres %s, flags 0x%x\n",
lockres->l_name, lockres->l_flags);

/* notice that we don't clear USER_LOCK_BLOCKED here. That's
* for user_ast to do. */
/* notice that we don't clear USER_LOCK_BLOCKED here. If it's
* set, we want user_ast clear it. */
lockres->l_flags &= ~USER_LOCK_QUEUED;

/* It's valid to get here and no longer be blocked - if we get
* several basts in a row, we might be queued by the first
* one, the unblock thread might run and clear the queued
* flag, and finally we might get another bast which re-queues
* us before our ast for the downconvert is called. */
if (!(lockres->l_flags & USER_LOCK_BLOCKED)) {
mlog(0, "Lockres %s, flags 0x%x: queued but not blocking\n",
lockres->l_name, lockres->l_flags);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}

if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
mlog(0, "lock is in teardown so we do nothing\n");
spin_unlock(&lockres->l_lock);
goto drop_ref;
}

if (lockres->l_flags & USER_LOCK_BUSY) {
mlog(0, "BUSY flag detected...\n");
mlog(0, "Cancel lock %s, flags 0x%x\n",
lockres->l_name, lockres->l_flags);

if (lockres->l_flags & USER_LOCK_IN_CANCEL) {
spin_unlock(&lockres->l_lock);
goto drop_ref;
Expand All @@ -296,14 +334,7 @@ static void user_dlm_unblock_lock(void *opaque)
LKM_CANCEL,
user_unlock_ast,
lockres);
if (status == DLM_CANCELGRANT) {
/* If we got this, then the ast was fired
* before we could cancel. We cleanup our
* state, and restart the function. */
spin_lock(&lockres->l_lock);
lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
spin_unlock(&lockres->l_lock);
} else if (status != DLM_NORMAL)
if (status != DLM_NORMAL)
user_log_dlm_error("dlmunlock", status, lockres);
goto drop_ref;
}
Expand Down Expand Up @@ -581,6 +612,14 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres)
mlog(0, "asked to destroy %s\n", lockres->l_name);

spin_lock(&lockres->l_lock);
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
mlog(0, "Lock is already torn down\n");
spin_unlock(&lockres->l_lock);
return 0;
}

lockres->l_flags |= USER_LOCK_IN_TEARDOWN;

while (lockres->l_flags & USER_LOCK_BUSY) {
spin_unlock(&lockres->l_lock);

Expand All @@ -606,7 +645,6 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres)

lockres->l_flags &= ~USER_LOCK_ATTACHED;
lockres->l_flags |= USER_LOCK_BUSY;
lockres->l_flags |= USER_LOCK_IN_TEARDOWN;
spin_unlock(&lockres->l_lock);

mlog(0, "unlocking lockres %s\n", lockres->l_name);
Expand Down
19 changes: 11 additions & 8 deletions fs/ocfs2/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,17 @@ static int ocfs2_truncate_file(struct inode *inode,
if (new_i_size == le64_to_cpu(fe->i_size))
goto bail;

/* This forces other nodes to sync and drop their pages. Do
* this even if we have a truncate without allocation change -
* ocfs2 cluster sizes can be much greater than page size, so
* we have to truncate them anyway. */
status = ocfs2_data_lock(inode, 1);
if (status < 0) {
mlog_errno(status);
goto bail;
}
ocfs2_data_unlock(inode, 1);

if (le32_to_cpu(fe->i_clusters) ==
ocfs2_clusters_for_bytes(osb->sb, new_i_size)) {
mlog(0, "fe->i_clusters = %u, so we do a simple truncate\n",
Expand All @@ -272,14 +283,6 @@ static int ocfs2_truncate_file(struct inode *inode,
goto bail;
}

/* This forces other nodes to sync and drop their pages */
status = ocfs2_data_lock(inode, 1);
if (status < 0) {
mlog_errno(status);
goto bail;
}
ocfs2_data_unlock(inode, 1);

/* alright, we're going to need to do a full blown alloc size
* change. Orphan the inode so that recovery can complete the
* truncate if necessary. This does the task of marking
Expand Down

0 comments on commit e38d557

Please sign in to comment.