Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 350333
b: refs/heads/master
c: 810cbee
h: refs/heads/master
i:
  350331: acebbe9
v: v3
  • Loading branch information
Li Zefan authored and Tejun Heo committed Feb 18, 2013
1 parent def0433 commit 8db9997
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 17 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 63f43f55c9bbc14f76b582644019b8a07dc8219a
refs/heads/master: 810cbee4fad570ff167132d4ecf247d99c48f71d
41 changes: 25 additions & 16 deletions trunk/kernel/cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -3786,8 +3786,13 @@ static void cgroup_event_remove(struct work_struct *work)
remove);
struct cgroup *cgrp = event->cgrp;

remove_wait_queue(event->wqh, &event->wait);

event->cft->unregister_event(cgrp, event->cft, event->eventfd);

/* Notify userspace the event is going away. */
eventfd_signal(event->eventfd, 1);

eventfd_ctx_put(event->eventfd);
kfree(event);
dput(cgrp->dentry);
Expand All @@ -3807,15 +3812,25 @@ static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
unsigned long flags = (unsigned long)key;

if (flags & POLLHUP) {
__remove_wait_queue(event->wqh, &event->wait);
spin_lock(&cgrp->event_list_lock);
list_del_init(&event->list);
spin_unlock(&cgrp->event_list_lock);
/*
* We are in atomic context, but cgroup_event_remove() may
* sleep, so we have to call it in workqueue.
* If the event has been detached at cgroup removal, we
* can simply return knowing the other side will cleanup
* for us.
*
* We can't race against event freeing since the other
* side will require wqh->lock via remove_wait_queue(),
* which we hold.
*/
schedule_work(&event->remove);
spin_lock(&cgrp->event_list_lock);
if (!list_empty(&event->list)) {
list_del_init(&event->list);
/*
* We are in atomic context, but cgroup_event_remove()
* may sleep, so we have to call it in workqueue.
*/
schedule_work(&event->remove);
}
spin_unlock(&cgrp->event_list_lock);
}

return 0;
Expand Down Expand Up @@ -4375,20 +4390,14 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
/*
* Unregister events and notify userspace.
* Notify userspace about cgroup removing only after rmdir of cgroup
* directory to avoid race between userspace and kernelspace. Use
* a temporary list to avoid a deadlock with cgroup_event_wake(). Since
* cgroup_event_wake() is called with the wait queue head locked,
* remove_wait_queue() cannot be called while holding event_list_lock.
* directory to avoid race between userspace and kernelspace.
*/
spin_lock(&cgrp->event_list_lock);
list_splice_init(&cgrp->event_list, &tmp_list);
spin_unlock(&cgrp->event_list_lock);
list_for_each_entry_safe(event, tmp, &tmp_list, list) {
list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
list_del_init(&event->list);
remove_wait_queue(event->wqh, &event->wait);
eventfd_signal(event->eventfd, 1);
schedule_work(&event->remove);
}
spin_unlock(&cgrp->event_list_lock);

return 0;
}
Expand Down

0 comments on commit 8db9997

Please sign in to comment.