Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 366075
b: refs/heads/master
c: 56dd947
h: refs/heads/master
i:
  366073: 7a6ea4c
  366071: 93e5400
v: v3
  • Loading branch information
Frederic Weisbecker committed Mar 7, 2013
1 parent a1b3217 commit 1ef7f95
Show file tree
Hide file tree
Showing 10 changed files with 225 additions and 195 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 27b4b9319a3c2e8654d45df99ce584c7c2cfe100
refs/heads/master: 56dd9470d7c8734f055da2a6bac553caf4a468eb
21 changes: 0 additions & 21 deletions trunk/arch/x86/include/asm/context_tracking.h
Original file line number Diff line number Diff line change
@@ -1,31 +1,10 @@
#ifndef _ASM_X86_CONTEXT_TRACKING_H
#define _ASM_X86_CONTEXT_TRACKING_H

#ifndef __ASSEMBLY__
#include <linux/context_tracking.h>
#include <asm/ptrace.h>

static inline void exception_enter(struct pt_regs *regs)
{
user_exit();
}

static inline void exception_exit(struct pt_regs *regs)
{
#ifdef CONFIG_CONTEXT_TRACKING
if (user_mode(regs))
user_enter();
#endif
}

#else /* __ASSEMBLY__ */

#ifdef CONFIG_CONTEXT_TRACKING
# define SCHEDULE_USER call schedule_user
#else
# define SCHEDULE_USER call schedule
#endif

#endif /* !__ASSEMBLY__ */

#endif
2 changes: 1 addition & 1 deletion trunk/arch/x86/kernel/kvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
* Authors: Anthony Liguori <aliguori@us.ibm.com>
*/

#include <linux/context_tracking.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kvm_para.h>
Expand All @@ -43,7 +44,6 @@
#include <asm/apicdef.h>
#include <asm/hypervisor.h>
#include <asm/kvm_guest.h>
#include <asm/context_tracking.h>

static int kvmapf = 1;

Expand Down
3 changes: 1 addition & 2 deletions trunk/arch/x86/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/context_tracking.h>
#include <linux/interrupt.h>
#include <linux/kallsyms.h>
#include <linux/spinlock.h>
Expand Down Expand Up @@ -55,8 +56,6 @@
#include <asm/i387.h>
#include <asm/fpu-internal.h>
#include <asm/mce.h>
#include <asm/context_tracking.h>

#include <asm/mach_traps.h>

#ifdef CONFIG_X86_64
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@
#include <linux/perf_event.h> /* perf_sw_event */
#include <linux/hugetlb.h> /* hstate_index_to_shift */
#include <linux/prefetch.h> /* prefetchw */
#include <linux/context_tracking.h> /* exception_enter(), ... */

#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
#include <asm/fixmap.h> /* VSYSCALL_START */
#include <asm/context_tracking.h> /* exception_enter(), ... */

/*
* Page fault error code bits:
Expand Down
17 changes: 16 additions & 1 deletion trunk/include/linux/context_tracking.h
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
#ifndef _LINUX_CONTEXT_TRACKING_H
#define _LINUX_CONTEXT_TRACKING_H

#ifdef CONFIG_CONTEXT_TRACKING
#include <linux/sched.h>
#include <linux/percpu.h>
#include <asm/ptrace.h>

#ifdef CONFIG_CONTEXT_TRACKING
struct context_tracking {
/*
* When active is false, probes are unset in order
Expand Down Expand Up @@ -33,12 +34,26 @@ static inline bool context_tracking_active(void)

extern void user_enter(void);
extern void user_exit(void);

static inline void exception_enter(struct pt_regs *regs)
{
user_exit();
}

static inline void exception_exit(struct pt_regs *regs)
{
if (user_mode(regs))
user_enter();
}

extern void context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next);
#else
static inline bool context_tracking_in_user(void) { return false; }
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline void exception_enter(struct pt_regs *regs) { }
static inline void exception_exit(struct pt_regs *regs) { }
static inline void context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* !CONFIG_CONTEXT_TRACKING */
Expand Down
194 changes: 192 additions & 2 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,18 @@ extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
extern void
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
#else
static inline void
proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
}
static inline void proc_sched_set_task(struct task_struct *p)
{
}
static inline void
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
}
#endif

/*
Expand Down Expand Up @@ -755,6 +767,31 @@ enum cpu_idle_type {
CPU_MAX_IDLE_TYPES
};

/*
* Increase resolution of nice-level calculations for 64-bit architectures.
* The extra resolution improves shares distribution and load balancing of
* low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
* hierarchies, especially on larger systems. This is not a user-visible change
* and does not change the user-interface for setting shares/weights.
*
* We increase resolution only if we have enough bits to allow this increased
* resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
* when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
* increased costs.
*/
#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
# define SCHED_LOAD_RESOLUTION 10
# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
#else
# define SCHED_LOAD_RESOLUTION 0
# define scale_load(w) (w)
# define scale_load_down(w) (w)
#endif

#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)

/*
* Increase resolution of cpu_power calculations
*/
Expand All @@ -780,6 +817,62 @@ enum cpu_idle_type {

extern int __weak arch_sd_sibiling_asym_packing(void);

struct sched_group_power {
atomic_t ref;
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* single CPU.
*/
unsigned int power, power_orig;
unsigned long next_update;
/*
* Number of busy cpus in this group.
*/
atomic_t nr_busy_cpus;

unsigned long cpumask[0]; /* iteration mask */
};

struct sched_group {
struct sched_group *next; /* Must be a circular list */
atomic_t ref;

unsigned int group_weight;
struct sched_group_power *sgp;

/*
* The CPUs this group covers.
*
* NOTE: this field is variable length. (Allocated dynamically
* by attaching extra space to the end of the structure,
* depending on how many CPUs the kernel has booted up with)
*/
unsigned long cpumask[0];
};

static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
{
return to_cpumask(sg->cpumask);
}

/*
* cpumask masking which cpus in the group are allowed to iterate up the domain
* tree.
*/
static inline struct cpumask *sched_group_mask(struct sched_group *sg)
{
return to_cpumask(sg->sgp->cpumask);
}

/**
* group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
* @group: The group whose first cpu is to be returned.
*/
static inline unsigned int group_first_cpu(struct sched_group *group)
{
return cpumask_first(sched_group_cpus(group));
}

struct sched_domain_attr {
int relax_domain_level;
};
Expand All @@ -790,8 +883,6 @@ struct sched_domain_attr {

extern int sched_domain_level_max;

struct sched_group;

struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
Expand Down Expand Up @@ -880,6 +971,18 @@ extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);

/* Test a flag in parent sched domain */
static inline int test_sd_parent(struct sched_domain *sd, int flag)
{
if (sd->parent && (sd->parent->flags & flag))
return 1;

return 0;
}

unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);

bool cpus_share_cache(int this_cpu, int that_cpu);

#else /* CONFIG_SMP */
Expand Down Expand Up @@ -914,6 +1017,72 @@ struct mempolicy;
struct pipe_inode_info;
struct uts_namespace;

struct rq;
struct sched_domain;

/*
* wake flags
*/
#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x04 /* internal use, task got migrated */

#define ENQUEUE_WAKEUP 1
#define ENQUEUE_HEAD 2
#ifdef CONFIG_SMP
#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
#else
#define ENQUEUE_WAKING 0
#endif

#define DEQUEUE_SLEEP 1

struct sched_class {
const struct sched_class *next;

void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
void (*yield_task) (struct rq *rq);
bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);

void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);

struct task_struct * (*pick_next_task) (struct rq *rq);
void (*put_prev_task) (struct rq *rq, struct task_struct *p);

#ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
void (*migrate_task_rq)(struct task_struct *p, int next_cpu);

void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
void (*task_waking) (struct task_struct *task);
void (*task_woken) (struct rq *this_rq, struct task_struct *task);

void (*set_cpus_allowed)(struct task_struct *p,
const struct cpumask *newmask);

void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
#endif

void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
void (*task_fork) (struct task_struct *p);

void (*switched_from) (struct rq *this_rq, struct task_struct *task);
void (*switched_to) (struct rq *this_rq, struct task_struct *task);
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
int oldprio);

unsigned int (*get_rr_interval) (struct rq *rq,
struct task_struct *task);

#ifdef CONFIG_FAIR_GROUP_SCHED
void (*task_move_group) (struct task_struct *p, int on_rq);
#endif
};

struct load_weight {
unsigned long weight, inv_weight;
};
Expand Down Expand Up @@ -2512,7 +2681,28 @@ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);

#ifdef CONFIG_CGROUP_SCHED

extern struct task_group root_task_group;

extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_online_group(struct task_group *tg,
struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg);
extern void sched_offline_group(struct task_group *tg);
extern void sched_move_task(struct task_struct *tsk);
#ifdef CONFIG_FAIR_GROUP_SCHED
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
extern unsigned long sched_group_shares(struct task_group *tg);
#endif
#ifdef CONFIG_RT_GROUP_SCHED
extern int sched_group_set_rt_runtime(struct task_group *tg,
long rt_runtime_us);
extern long sched_group_rt_runtime(struct task_group *tg);
extern int sched_group_set_rt_period(struct task_group *tg,
long rt_period_us);
extern long sched_group_rt_period(struct task_group *tg);
extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
#endif
#endif /* CONFIG_CGROUP_SCHED */

extern int task_can_switch_user(struct user_struct *up,
Expand Down
Loading

0 comments on commit 1ef7f95

Please sign in to comment.