Skip to content

Commit

Permalink
Merge tag 'cputime-adjustment-cleanups' of git://git.kernel.org/pub/s…
Browse files Browse the repository at this point in the history
…cm/linux/kernel/git/frederic/linux-dynticks into sched/core

Pull cputime cleanups from Frederic Weisbecker:

 * Improve naming and code location

 * Consolidate adjustment code

 * Comment the adjustement code

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Ingo Molnar committed Dec 8, 2012
2 parents 222e82b + fa09205 commit e783377
Show file tree
Hide file tree
Showing 13 changed files with 119 additions and 205 deletions.
4 changes: 2 additions & 2 deletions fs/proc/array.c
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,

min_flt += sig->min_flt;
maj_flt += sig->maj_flt;
thread_group_times(task, &utime, &stime);
thread_group_cputime_adjusted(task, &utime, &stime);
gtime += sig->gtime;
}

Expand All @@ -454,7 +454,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
if (!whole) {
min_flt = task->min_flt;
maj_flt = task->maj_flt;
task_times(task, &utime, &stime);
task_cputime_adjusted(task, &utime, &stime);
gtime = task->gtime;
}

Expand Down
78 changes: 0 additions & 78 deletions fs/proc/base.c
Original file line number Diff line number Diff line change
Expand Up @@ -1272,81 +1272,6 @@ static const struct file_operations proc_pid_sched_operations = {

#endif

#ifdef CONFIG_SCHED_AUTOGROUP
/*
* Print out autogroup related information:
*/
static int sched_autogroup_show(struct seq_file *m, void *v)
{
struct inode *inode = m->private;
struct task_struct *p;

p = get_proc_task(inode);
if (!p)
return -ESRCH;
proc_sched_autogroup_show_task(p, m);

put_task_struct(p);

return 0;
}

static ssize_t
sched_autogroup_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct task_struct *p;
char buffer[PROC_NUMBUF];
int nice;
int err;

memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count))
return -EFAULT;

err = kstrtoint(strstrip(buffer), 0, &nice);
if (err < 0)
return err;

p = get_proc_task(inode);
if (!p)
return -ESRCH;

err = proc_sched_autogroup_set_nice(p, nice);
if (err)
count = err;

put_task_struct(p);

return count;
}

static int sched_autogroup_open(struct inode *inode, struct file *filp)
{
int ret;

ret = single_open(filp, sched_autogroup_show, NULL);
if (!ret) {
struct seq_file *m = filp->private_data;

m->private = inode;
}
return ret;
}

static const struct file_operations proc_pid_sched_autogroup_operations = {
.open = sched_autogroup_open,
.read = seq_read,
.write = sched_autogroup_write,
.llseek = seq_lseek,
.release = single_release,
};

#endif /* CONFIG_SCHED_AUTOGROUP */

static ssize_t comm_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset)
{
Expand Down Expand Up @@ -2657,9 +2582,6 @@ static const struct pid_entry tgid_base_stuff[] = {
INF("limits", S_IRUGO, proc_pid_limits),
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
#ifdef CONFIG_SCHED_AUTOGROUP
REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
#endif
REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
Expand Down
27 changes: 21 additions & 6 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -433,14 +433,29 @@ struct cpu_itimer {
u32 incr_error;
};

/**
* struct cputime - snaphsot of system and user cputime
* @utime: time spent in user mode
* @stime: time spent in system mode
*
* Gathers a generic snapshot of user and system time.
*/
struct cputime {
cputime_t utime;
cputime_t stime;
};

/**
* struct task_cputime - collected CPU time counts
* @utime: time spent in user mode, in &cputime_t units
* @stime: time spent in kernel mode, in &cputime_t units
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
*
* This structure groups together three kinds of CPU time that are
* tracked for threads and thread groups. Most things considering
* This is an extension of struct cputime that includes the total runtime
* spent by the task from the scheduler point of view.
*
* As a result, this structure groups together three kinds of CPU time
* that are tracked for threads and thread groups. Most things considering
* CPU time want to group these counts together and treat all three
* of them in parallel.
*/
Expand Down Expand Up @@ -581,7 +596,7 @@ struct signal_struct {
cputime_t gtime;
cputime_t cgtime;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
cputime_t prev_utime, prev_stime;
struct cputime prev_cputime;
#endif
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
Expand Down Expand Up @@ -1340,7 +1355,7 @@ struct task_struct {
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
cputime_t prev_utime, prev_stime;
struct cputime prev_cputime;
#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
struct timespec start_time; /* monotonic time */
Expand Down Expand Up @@ -1751,8 +1766,8 @@ static inline void put_task_struct(struct task_struct *t)
__put_task_struct(t);
}

extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);

/*
* Per process flags
Expand Down
4 changes: 2 additions & 2 deletions kernel/exit.c
Original file line number Diff line number Diff line change
Expand Up @@ -1186,11 +1186,11 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
* as other threads in the parent group can be right
* here reaping other children at the same time.
*
* We use thread_group_times() to get times for the thread
* We use thread_group_cputime_adjusted() to get times for the thread
* group, which consolidates times for all threads in the
* group including the group leader.
*/
thread_group_times(p, &tgutime, &tgstime);
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
spin_lock_irq(&p->real_parent->sighand->siglock);
psig = p->real_parent->signal;
sig = p->signal;
Expand Down
2 changes: 1 addition & 1 deletion kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -1222,7 +1222,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->utime = p->stime = p->gtime = 0;
p->utimescaled = p->stimescaled = 0;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
p->prev_utime = p->prev_stime = 0;
p->prev_cputime.utime = p->prev_cputime.stime = 0;
#endif
#if defined(SPLIT_RSS_COUNTING)
memset(&p->rss_stat, 0, sizeof(p->rss_stat));
Expand Down
24 changes: 0 additions & 24 deletions kernel/posix-cpu-timers.c
Original file line number Diff line number Diff line change
Expand Up @@ -217,30 +217,6 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
return 0;
}

void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
{
struct signal_struct *sig = tsk->signal;
struct task_struct *t;

times->utime = sig->utime;
times->stime = sig->stime;
times->sum_exec_runtime = sig->sum_sched_runtime;

rcu_read_lock();
/* make sure we can trust tsk->thread_group list */
if (!likely(pid_alive(tsk)))
goto out;

t = tsk;
do {
times->utime += t->utime;
times->stime += t->stime;
times->sum_exec_runtime += task_sched_runtime(t);
} while_each_thread(tsk, t);
out:
rcu_read_unlock();
}

static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
{
if (b->utime > a->utime)
Expand Down
64 changes: 11 additions & 53 deletions kernel/sched/auto_group.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,9 @@ static inline struct autogroup *autogroup_create(void)

bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
{
if (!sysctl_sched_autogroup_enabled)
return false;

if (tg != &root_task_group)
return false;

Expand Down Expand Up @@ -155,8 +158,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
/* Allocates GFP_KERNEL, cannot be called under any spinlock */
void sched_autogroup_create_attach(struct task_struct *p)
{
struct autogroup *ag = autogroup_create();
struct autogroup *ag;

if (!sysctl_sched_autogroup_enabled)
return;
ag = autogroup_create();
autogroup_move_group(p, ag);
/* drop extra reference added by autogroup_create() */
autogroup_kref_put(ag);
Expand All @@ -172,11 +178,15 @@ EXPORT_SYMBOL(sched_autogroup_detach);

void sched_autogroup_fork(struct signal_struct *sig)
{
if (!sysctl_sched_autogroup_enabled)
return;
sig->autogroup = autogroup_task_get(current);
}

void sched_autogroup_exit(struct signal_struct *sig)
{
if (!sysctl_sched_autogroup_enabled)
return;
autogroup_kref_put(sig->autogroup);
}

Expand All @@ -189,58 +199,6 @@ static int __init setup_autogroup(char *str)

__setup("noautogroup", setup_autogroup);

#ifdef CONFIG_PROC_FS

int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
{
static unsigned long next = INITIAL_JIFFIES;
struct autogroup *ag;
int err;

if (nice < -20 || nice > 19)
return -EINVAL;

err = security_task_setnice(current, nice);
if (err)
return err;

if (nice < 0 && !can_nice(current, nice))
return -EPERM;

/* this is a heavy operation taking global locks.. */
if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next))
return -EAGAIN;

next = HZ / 10 + jiffies;
ag = autogroup_task_get(p);

down_write(&ag->lock);
err = sched_group_set_shares(ag->tg, prio_to_weight[nice + 20]);
if (!err)
ag->nice = nice;
up_write(&ag->lock);

autogroup_kref_put(ag);

return err;
}

void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
{
struct autogroup *ag = autogroup_task_get(p);

if (!task_group_is_autogroup(ag->tg))
goto out;

down_read(&ag->lock);
seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
up_read(&ag->lock);

out:
autogroup_kref_put(ag);
}
#endif /* CONFIG_PROC_FS */

#ifdef CONFIG_SCHED_DEBUG
int autogroup_path(struct task_group *tg, char *buf, int buflen)
{
Expand Down
4 changes: 1 addition & 3 deletions kernel/sched/auto_group.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,7 @@ extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
static inline struct task_group *
autogroup_task_group(struct task_struct *p, struct task_group *tg)
{
int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);

if (enabled && task_wants_autogroup(p, tg))
if (task_wants_autogroup(p, tg))
return p->signal->autogroup->tg;

return tg;
Expand Down
Loading

0 comments on commit e783377

Please sign in to comment.