diff --git a/[refs] b/[refs] index c8d988c186e7..4fca449f7d1d 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: fddfb02ad0d0d3b479c2a26a8ae7e6411b34706b +refs/heads/master: 205a872bd6f9a9a09ef035ef1e90185a8245cc58 diff --git a/trunk/kernel/cgroup.c b/trunk/kernel/cgroup.c index c02b05560d10..589433f7a74b 100644 --- a/trunk/kernel/cgroup.c +++ b/trunk/kernel/cgroup.c @@ -4277,6 +4277,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) DEFINE_WAIT(wait); struct cgroup_event *event, *tmp; struct cgroup_subsys *ss; + LIST_HEAD(tmp_list); lockdep_assert_held(&d->d_inode->i_mutex); lockdep_assert_held(&cgroup_mutex); @@ -4331,16 +4332,20 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) /* * Unregister events and notify userspace. * Notify userspace about cgroup removing only after rmdir of cgroup - * directory to avoid race between userspace and kernelspace + * directory to avoid race between userspace and kernelspace. Use + * a temporary list to avoid a deadlock with cgroup_event_wake(). Since + * cgroup_event_wake() is called with the wait queue head locked, + * remove_wait_queue() cannot be called while holding event_list_lock. */ spin_lock(&cgrp->event_list_lock); - list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) { + list_splice_init(&cgrp->event_list, &tmp_list); + spin_unlock(&cgrp->event_list_lock); + list_for_each_entry_safe(event, tmp, &tmp_list, list) { list_del(&event->list); remove_wait_queue(event->wqh, &event->wait); eventfd_signal(event->eventfd, 1); schedule_work(&event->remove); } - spin_unlock(&cgrp->event_list_lock); return 0; }