Skip to content

Commit

Permalink
sched/autogroup: Fix crash on reboot when autogroup is disabled
Browse files Browse the repository at this point in the history
Due to these two commits:

  8323f26 sched: Fix race in task_group()
  800d4d3 sched, autogroup: Stop going ahead if autogroup is disabled

... autogroup scheduling's dynamic knobs are wrecked.

With both patches applied, all you have to do to crash a box is
disable autogroup during boot up, then reboot.. boom, NULL pointer
dereference due to 800d4d3 not allowing autogroup to move things,
and 8323f26 making that the only way to switch runqueues.

Remove most of the (dysfunctional) knobs and turn the remaining
sched_autogroup_enabled knob readonly.

If the user fiddles with cgroups hereafter, once tasks
are moved, autogroup won't mess with them again unless
they call setsid().

No knobs, no glitz, nada, just a cute little thing folks can
turn on if they don't want to muck about with cgroups and/or
systemd.

Signed-off-by: Mike Galbraith <efault@gmx.de>
Cc: Xiaotian Feng <xtfeng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xiaotian Feng <dannyfeng@tencent.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: <stable@vger.kernel.org> # v3.6
Link: http://lkml.kernel.org/r/1351451963.4999.8.camel@maggy.simpson.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Mike Galbraith authored and Ingo Molnar committed Oct 30, 2012
1 parent 8ed92e5 commit 5258f38
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 147 deletions.
78 changes: 0 additions & 78 deletions fs/proc/base.c
Original file line number Diff line number Diff line change
Expand Up @@ -1271,81 +1271,6 @@ static const struct file_operations proc_pid_sched_operations = {

#endif

#ifdef CONFIG_SCHED_AUTOGROUP
/*
* Print out autogroup related information:
*/
static int sched_autogroup_show(struct seq_file *m, void *v)
{
struct inode *inode = m->private;
struct task_struct *p;

p = get_proc_task(inode);
if (!p)
return -ESRCH;
proc_sched_autogroup_show_task(p, m);

put_task_struct(p);

return 0;
}

static ssize_t
sched_autogroup_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct task_struct *p;
char buffer[PROC_NUMBUF];
int nice;
int err;

memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count))
return -EFAULT;

err = kstrtoint(strstrip(buffer), 0, &nice);
if (err < 0)
return err;

p = get_proc_task(inode);
if (!p)
return -ESRCH;

err = proc_sched_autogroup_set_nice(p, nice);
if (err)
count = err;

put_task_struct(p);

return count;
}

static int sched_autogroup_open(struct inode *inode, struct file *filp)
{
int ret;

ret = single_open(filp, sched_autogroup_show, NULL);
if (!ret) {
struct seq_file *m = filp->private_data;

m->private = inode;
}
return ret;
}

static const struct file_operations proc_pid_sched_autogroup_operations = {
.open = sched_autogroup_open,
.read = seq_read,
.write = sched_autogroup_write,
.llseek = seq_lseek,
.release = single_release,
};

#endif /* CONFIG_SCHED_AUTOGROUP */

static ssize_t comm_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset)
{
Expand Down Expand Up @@ -3035,9 +2960,6 @@ static const struct pid_entry tgid_base_stuff[] = {
INF("limits", S_IRUGO, proc_pid_limits),
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
#ifdef CONFIG_SCHED_AUTOGROUP
REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
#endif
REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
Expand Down
68 changes: 11 additions & 57 deletions kernel/sched/auto_group.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,9 @@ static inline struct autogroup *autogroup_create(void)

bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
{
if (!sysctl_sched_autogroup_enabled)
return false;

if (tg != &root_task_group)
return false;

Expand Down Expand Up @@ -143,24 +146,23 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)

p->signal->autogroup = autogroup_kref_get(ag);

if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
goto out;

t = p;
do {
sched_move_task(t);
} while_each_thread(p, t);

out:
unlock_task_sighand(p, &flags);
autogroup_kref_put(prev);
}

/* Allocates GFP_KERNEL, cannot be called under any spinlock */
void sched_autogroup_create_attach(struct task_struct *p)
{
struct autogroup *ag = autogroup_create();
struct autogroup *ag;

if (!sysctl_sched_autogroup_enabled)
return;
ag = autogroup_create();
autogroup_move_group(p, ag);
/* drop extra reference added by autogroup_create() */
autogroup_kref_put(ag);
Expand All @@ -176,11 +178,15 @@ EXPORT_SYMBOL(sched_autogroup_detach);

void sched_autogroup_fork(struct signal_struct *sig)
{
if (!sysctl_sched_autogroup_enabled)
return;
sig->autogroup = autogroup_task_get(current);
}

void sched_autogroup_exit(struct signal_struct *sig)
{
if (!sysctl_sched_autogroup_enabled)
return;
autogroup_kref_put(sig->autogroup);
}

Expand All @@ -193,58 +199,6 @@ static int __init setup_autogroup(char *str)

__setup("noautogroup", setup_autogroup);

#ifdef CONFIG_PROC_FS

int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
{
static unsigned long next = INITIAL_JIFFIES;
struct autogroup *ag;
int err;

if (nice < -20 || nice > 19)
return -EINVAL;

err = security_task_setnice(current, nice);
if (err)
return err;

if (nice < 0 && !can_nice(current, nice))
return -EPERM;

/* this is a heavy operation taking global locks.. */
if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next))
return -EAGAIN;

next = HZ / 10 + jiffies;
ag = autogroup_task_get(p);

down_write(&ag->lock);
err = sched_group_set_shares(ag->tg, prio_to_weight[nice + 20]);
if (!err)
ag->nice = nice;
up_write(&ag->lock);

autogroup_kref_put(ag);

return err;
}

void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
{
struct autogroup *ag = autogroup_task_get(p);

if (!task_group_is_autogroup(ag->tg))
goto out;

down_read(&ag->lock);
seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
up_read(&ag->lock);

out:
autogroup_kref_put(ag);
}
#endif /* CONFIG_PROC_FS */

#ifdef CONFIG_SCHED_DEBUG
int autogroup_path(struct task_group *tg, char *buf, int buflen)
{
Expand Down
9 changes: 1 addition & 8 deletions kernel/sched/auto_group.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,6 @@
#include <linux/rwsem.h>

struct autogroup {
/*
* reference doesn't mean how many thread attach to this
* autogroup now. It just stands for the number of task
* could use this autogroup.
*/
struct kref kref;
struct task_group *tg;
struct rw_semaphore lock;
Expand All @@ -29,9 +24,7 @@ extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
static inline struct task_group *
autogroup_task_group(struct task_struct *p, struct task_group *tg)
{
int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);

if (enabled && task_wants_autogroup(p, tg))
if (task_wants_autogroup(p, tg))
return p->signal->autogroup->tg;

return tg;
Expand Down
6 changes: 2 additions & 4 deletions kernel/sysctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -363,10 +363,8 @@ static struct ctl_table kern_table[] = {
.procname = "sched_autogroup_enabled",
.data = &sysctl_sched_autogroup_enabled,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
.mode = 0444,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_CFS_BANDWIDTH
Expand Down

0 comments on commit 5258f38

Please sign in to comment.