diff --git a/[refs] b/[refs] index f570ca49175a..695a66d6e2d6 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 144d80bed91a6557269c935bf03e0f56089e5be0 +refs/heads/master: 2e680dd61e80592385338bfbeb86833d1c60546c diff --git a/trunk/drivers/edac/amd64_edac.c b/trunk/drivers/edac/amd64_edac.c index cc8e7c78a23c..5a297a26211d 100644 --- a/trunk/drivers/edac/amd64_edac.c +++ b/trunk/drivers/edac/amd64_edac.c @@ -170,11 +170,8 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) * memory controller and apply to register. Search for the first * bandwidth entry that is greater or equal than the setting requested * and program that. If at last entry, turn off DRAM scrubbing. - * - * If no suitable bandwidth is found, turn off DRAM scrubbing entirely - * by falling back to the last element in scrubrates[]. */ - for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) { + for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { /* * skip scrub rates which aren't recommended * (see F10 BKDG, F3x58) @@ -184,6 +181,12 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) if (scrubrates[i].bandwidth <= new_bw) break; + + /* + * if no suitable bandwidth found, turn off DRAM scrubbing + * entirely by falling back to the last element in the + * scrubrates array. + */ } scrubval = scrubrates[i].scrubval; diff --git a/trunk/kernel/cgroup.c b/trunk/kernel/cgroup.c index f24f724620dd..13774b3b39aa 100644 --- a/trunk/kernel/cgroup.c +++ b/trunk/kernel/cgroup.c @@ -1962,8 +1962,9 @@ static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp, * trading it for newcg is protected by cgroup_mutex, we're safe to drop * it here; it will be freed under RCU. */ - set_bit(CGRP_RELEASABLE, &oldcgrp->flags); put_css_set(oldcg); + + set_bit(CGRP_RELEASABLE, &oldcgrp->flags); } /** @@ -4814,20 +4815,31 @@ static const struct file_operations proc_cgroupstats_operations = { * * A pointer to the shared css_set was automatically copied in * fork.c by dup_task_struct(). However, we ignore that copy, since - * it was not made under the protection of RCU or cgroup_mutex, so - * might no longer be a valid cgroup pointer. cgroup_attach_task() might - * have already changed current->cgroups, allowing the previously - * referenced cgroup group to be removed and freed. + * it was not made under the protection of RCU, cgroup_mutex or + * threadgroup_change_begin(), so it might no longer be a valid + * cgroup pointer. cgroup_attach_task() might have already changed + * current->cgroups, allowing the previously referenced cgroup + * group to be removed and freed. + * + * Outside the pointer validity we also need to process the css_set + * inheritance between threadgoup_change_begin() and + * threadgoup_change_end(), this way there is no leak in any process + * wide migration performed by cgroup_attach_proc() that could otherwise + * miss a thread because it is too early or too late in the fork stage. * * At the point that cgroup_fork() is called, 'current' is the parent * task, and the passed argument 'child' points to the child task. */ void cgroup_fork(struct task_struct *child) { - task_lock(current); + /* + * We don't need to task_lock() current because current->cgroups + * can't be changed concurrently here. The parent obviously hasn't + * exited and called cgroup_exit(), and we are synchronized against + * cgroup migration through threadgroup_change_begin(). + */ child->cgroups = current->cgroups; get_css_set(child->cgroups); - task_unlock(current); INIT_LIST_HEAD(&child->cg_list); } @@ -4883,10 +4895,19 @@ void cgroup_post_fork(struct task_struct *child) */ if (use_task_css_set_links) { write_lock(&css_set_lock); - task_lock(child); - if (list_empty(&child->cg_list)) + if (list_empty(&child->cg_list)) { + /* + * It's safe to use child->cgroups without task_lock() + * here because we are protected through + * threadgroup_change_begin() against concurrent + * css_set change in cgroup_task_migrate(). Also + * the task can't exit at that point until + * wake_up_new_task() is called, so we are protected + * against cgroup_exit() setting child->cgroup to + * init_css_set. + */ list_add(&child->cg_list, &child->cgroups->tasks); - task_unlock(child); + } write_unlock(&css_set_lock); } } diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index 042d221d33cc..d951daa0ca9a 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -2982,7 +2982,7 @@ bool cancel_delayed_work(struct delayed_work *dwork) set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work)); local_irq_restore(flags); - return ret; + return true; } EXPORT_SYMBOL(cancel_delayed_work); diff --git a/trunk/security/apparmor/policy.c b/trunk/security/apparmor/policy.c index cf5fd220309b..813200384d97 100644 --- a/trunk/security/apparmor/policy.c +++ b/trunk/security/apparmor/policy.c @@ -724,6 +724,8 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, int hat) */ static void free_profile(struct aa_profile *profile) { + struct aa_profile *p; + AA_DEBUG("%s(%p)\n", __func__, profile); if (!profile) @@ -751,7 +753,27 @@ static void free_profile(struct aa_profile *profile) aa_put_dfa(profile->xmatch); aa_put_dfa(profile->policy.dfa); - aa_put_profile(profile->replacedby); + /* put the profile reference for replacedby, but not via + * put_profile(kref_put). + * replacedby can form a long chain that can result in cascading + * frees that blows the stack because kref_put makes a nested fn + * call (it looks like recursion, with free_profile calling + * free_profile) for each profile in the chain lp#1056078. + */ + for (p = profile->replacedby; p; ) { + if (atomic_dec_and_test(&p->base.count.refcount)) { + /* no more refs on p, grab its replacedby */ + struct aa_profile *next = p->replacedby; + /* break the chain */ + p->replacedby = NULL; + /* now free p, chain is broken */ + free_profile(p); + + /* follow up with next profile in the chain */ + p = next; + } else + break; + } kzfree(profile); }