Skip to content

Commit

Permalink
xenbus: do not hold transaction_mutex when returning to userspace
Browse files Browse the repository at this point in the history
================================================
  [ BUG: lock held when returning to user space! ]
  ------------------------------------------------
  xenstore-list/3522 is leaving the kernel with locks still held!
  1 lock held by xenstore-list/3522:
   #0:  (&xs_state.transaction_mutex){......}, at: [<c026dc6f>] xenbus_dev_request_and_reply+0x8f/0xa0

The canonical fix for this type of issue appears to be to maintain a
count manually rather than using an rwsem so do that here.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
  • Loading branch information
Ian Campbell authored and Jeremy Fitzhardinge committed Nov 3, 2009
1 parent 74fca6a commit 4c31a78
Showing 1 changed file with 47 additions and 10 deletions.
57 changes: 47 additions & 10 deletions drivers/xen/xenbus/xenbus_xs.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,14 @@ struct xs_handle {
/*
* Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex.
* response_mutex is never taken simultaneously with the other three.
*
* transaction_mutex must be held before incrementing
* transaction_count. The mutex is held when a suspend is in
* progress to prevent new transactions starting.
*
* When decrementing transaction_count to zero the wait queue
* should be woken up, the suspend code waits for count to
* reach zero.
*/

/* One request at a time. */
Expand All @@ -85,7 +93,9 @@ struct xs_handle {
struct mutex response_mutex;

/* Protect transactions against save/restore. */
struct rw_semaphore transaction_mutex;
struct mutex transaction_mutex;
atomic_t transaction_count;
wait_queue_head_t transaction_wq;

/* Protect watch (de)register against save/restore. */
struct rw_semaphore watch_mutex;
Expand Down Expand Up @@ -157,14 +167,39 @@ static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
return body;
}

static void transaction_start(void)
{
mutex_lock(&xs_state.transaction_mutex);
atomic_inc(&xs_state.transaction_count);
mutex_unlock(&xs_state.transaction_mutex);
}

static void transaction_end(void)
{
if (atomic_dec_and_test(&xs_state.transaction_count))
wake_up(&xs_state.transaction_wq);
}

static void transaction_suspend(void)
{
mutex_lock(&xs_state.transaction_mutex);
wait_event(xs_state.transaction_wq,
atomic_read(&xs_state.transaction_count) == 0);
}

static void transaction_resume(void)
{
mutex_unlock(&xs_state.transaction_mutex);
}

void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
{
void *ret;
struct xsd_sockmsg req_msg = *msg;
int err;

if (req_msg.type == XS_TRANSACTION_START)
down_read(&xs_state.transaction_mutex);
transaction_start();

mutex_lock(&xs_state.request_mutex);

Expand All @@ -180,7 +215,7 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
if ((msg->type == XS_TRANSACTION_END) ||
((req_msg.type == XS_TRANSACTION_START) &&
(msg->type == XS_ERROR)))
up_read(&xs_state.transaction_mutex);
transaction_end();

return ret;
}
Expand Down Expand Up @@ -432,11 +467,11 @@ int xenbus_transaction_start(struct xenbus_transaction *t)
{
char *id_str;

down_read(&xs_state.transaction_mutex);
transaction_start();

id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
if (IS_ERR(id_str)) {
up_read(&xs_state.transaction_mutex);
transaction_end();
return PTR_ERR(id_str);
}

Expand All @@ -461,7 +496,7 @@ int xenbus_transaction_end(struct xenbus_transaction t, int abort)

err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));

up_read(&xs_state.transaction_mutex);
transaction_end();

return err;
}
Expand Down Expand Up @@ -662,7 +697,7 @@ EXPORT_SYMBOL_GPL(unregister_xenbus_watch);

void xs_suspend(void)
{
down_write(&xs_state.transaction_mutex);
transaction_suspend();
down_write(&xs_state.watch_mutex);
mutex_lock(&xs_state.request_mutex);
mutex_lock(&xs_state.response_mutex);
Expand All @@ -677,7 +712,7 @@ void xs_resume(void)

mutex_unlock(&xs_state.response_mutex);
mutex_unlock(&xs_state.request_mutex);
up_write(&xs_state.transaction_mutex);
transaction_resume();

/* No need for watches_lock: the watch_mutex is sufficient. */
list_for_each_entry(watch, &watches, list) {
Expand All @@ -693,7 +728,7 @@ void xs_suspend_cancel(void)
mutex_unlock(&xs_state.response_mutex);
mutex_unlock(&xs_state.request_mutex);
up_write(&xs_state.watch_mutex);
up_write(&xs_state.transaction_mutex);
mutex_unlock(&xs_state.transaction_mutex);
}

static int xenwatch_thread(void *unused)
Expand Down Expand Up @@ -843,8 +878,10 @@ int xs_init(void)

mutex_init(&xs_state.request_mutex);
mutex_init(&xs_state.response_mutex);
init_rwsem(&xs_state.transaction_mutex);
mutex_init(&xs_state.transaction_mutex);
init_rwsem(&xs_state.watch_mutex);
atomic_set(&xs_state.transaction_count, 0);
init_waitqueue_head(&xs_state.transaction_wq);

/* Initialize the shared memory rings to talk to xenstored */
err = xb_init_comms();
Expand Down

0 comments on commit 4c31a78

Please sign in to comment.