Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 334805
b: refs/heads/master
c: 206aa6a
h: refs/heads/master
i:
  334803: 060ae2e
v: v3
  • Loading branch information
Linus Torvalds committed Oct 24, 2012
1 parent 7573e95 commit 33ff5d8
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 40 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2e680dd61e80592385338bfbeb86833d1c60546c
refs/heads/master: 206aa6a6c57a24d927e89071fbbf690208052caf
11 changes: 4 additions & 7 deletions trunk/drivers/edac/amd64_edac.c
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
* memory controller and apply to register. Search for the first
* bandwidth entry that is greater or equal than the setting requested
* and program that. If at last entry, turn off DRAM scrubbing.
*
* If no suitable bandwidth is found, turn off DRAM scrubbing entirely
* by falling back to the last element in scrubrates[].
*/
for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
/*
* skip scrub rates which aren't recommended
* (see F10 BKDG, F3x58)
Expand All @@ -181,12 +184,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)

if (scrubrates[i].bandwidth <= new_bw)
break;

/*
* if no suitable bandwidth found, turn off DRAM scrubbing
* entirely by falling back to the last element in the
* scrubrates array.
*/
}

scrubval = scrubrates[i].scrubval;
Expand Down
41 changes: 10 additions & 31 deletions trunk/kernel/cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1962,9 +1962,8 @@ static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
* trading it for newcg is protected by cgroup_mutex, we're safe to drop
* it here; it will be freed under RCU.
*/
put_css_set(oldcg);

set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
put_css_set(oldcg);
}

/**
Expand Down Expand Up @@ -4815,31 +4814,20 @@ static const struct file_operations proc_cgroupstats_operations = {
*
* A pointer to the shared css_set was automatically copied in
* fork.c by dup_task_struct(). However, we ignore that copy, since
* it was not made under the protection of RCU, cgroup_mutex or
* threadgroup_change_begin(), so it might no longer be a valid
* cgroup pointer. cgroup_attach_task() might have already changed
* current->cgroups, allowing the previously referenced cgroup
* group to be removed and freed.
*
* Outside the pointer validity we also need to process the css_set
* inheritance between threadgoup_change_begin() and
* threadgoup_change_end(), this way there is no leak in any process
* wide migration performed by cgroup_attach_proc() that could otherwise
* miss a thread because it is too early or too late in the fork stage.
* it was not made under the protection of RCU or cgroup_mutex, so
* might no longer be a valid cgroup pointer. cgroup_attach_task() might
* have already changed current->cgroups, allowing the previously
* referenced cgroup group to be removed and freed.
*
* At the point that cgroup_fork() is called, 'current' is the parent
* task, and the passed argument 'child' points to the child task.
*/
void cgroup_fork(struct task_struct *child)
{
/*
* We don't need to task_lock() current because current->cgroups
* can't be changed concurrently here. The parent obviously hasn't
* exited and called cgroup_exit(), and we are synchronized against
* cgroup migration through threadgroup_change_begin().
*/
task_lock(current);
child->cgroups = current->cgroups;
get_css_set(child->cgroups);
task_unlock(current);
INIT_LIST_HEAD(&child->cg_list);
}

Expand Down Expand Up @@ -4895,19 +4883,10 @@ void cgroup_post_fork(struct task_struct *child)
*/
if (use_task_css_set_links) {
write_lock(&css_set_lock);
if (list_empty(&child->cg_list)) {
/*
* It's safe to use child->cgroups without task_lock()
* here because we are protected through
* threadgroup_change_begin() against concurrent
* css_set change in cgroup_task_migrate(). Also
* the task can't exit at that point until
* wake_up_new_task() is called, so we are protected
* against cgroup_exit() setting child->cgroup to
* init_css_set.
*/
task_lock(child);
if (list_empty(&child->cg_list))
list_add(&child->cg_list, &child->cgroups->tasks);
}
task_unlock(child);
write_unlock(&css_set_lock);
}
}
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -2982,7 +2982,7 @@ bool cancel_delayed_work(struct delayed_work *dwork)

set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
local_irq_restore(flags);
return true;
return ret;
}
EXPORT_SYMBOL(cancel_delayed_work);

Expand Down

0 comments on commit 33ff5d8

Please sign in to comment.