Skip to content

Commit

Permalink
sched: Move struct sched_group to kernel/sched/sched.h
Browse files Browse the repository at this point in the history
Move struct sched_group_power and sched_group and related inline
functions to kernel/sched/sched.h, as they are used internally
only.

Signed-off-by: Li Zefan <lizefan@huawei.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/5135A77F.2010705@huawei.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Li Zefan authored and Ingo Molnar committed Mar 6, 2013
1 parent cc1f4b1 commit 5e6521e
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 56 deletions.
58 changes: 2 additions & 56 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -780,62 +780,6 @@ enum cpu_idle_type {

extern int __weak arch_sd_sibiling_asym_packing(void);

struct sched_group_power {
atomic_t ref;
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* single CPU.
*/
unsigned int power, power_orig;
unsigned long next_update;
/*
* Number of busy cpus in this group.
*/
atomic_t nr_busy_cpus;

unsigned long cpumask[0]; /* iteration mask */
};

struct sched_group {
struct sched_group *next; /* Must be a circular list */
atomic_t ref;

unsigned int group_weight;
struct sched_group_power *sgp;

/*
* The CPUs this group covers.
*
* NOTE: this field is variable length. (Allocated dynamically
* by attaching extra space to the end of the structure,
* depending on how many CPUs the kernel has booted up with)
*/
unsigned long cpumask[0];
};

static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
{
return to_cpumask(sg->cpumask);
}

/*
* cpumask masking which cpus in the group are allowed to iterate up the domain
* tree.
*/
static inline struct cpumask *sched_group_mask(struct sched_group *sg)
{
return to_cpumask(sg->sgp->cpumask);
}

/**
* group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
* @group: The group whose first cpu is to be returned.
*/
static inline unsigned int group_first_cpu(struct sched_group *group)
{
return cpumask_first(sched_group_cpus(group));
}

struct sched_domain_attr {
int relax_domain_level;
};
Expand All @@ -846,6 +790,8 @@ struct sched_domain_attr {

extern int sched_domain_level_max;

struct sched_group;

struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
Expand Down
56 changes: 56 additions & 0 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -572,6 +572,62 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
DECLARE_PER_CPU(struct sched_domain *, sd_llc);
DECLARE_PER_CPU(int, sd_llc_id);

struct sched_group_power {
atomic_t ref;
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* single CPU.
*/
unsigned int power, power_orig;
unsigned long next_update;
/*
* Number of busy cpus in this group.
*/
atomic_t nr_busy_cpus;

unsigned long cpumask[0]; /* iteration mask */
};

struct sched_group {
struct sched_group *next; /* Must be a circular list */
atomic_t ref;

unsigned int group_weight;
struct sched_group_power *sgp;

/*
* The CPUs this group covers.
*
* NOTE: this field is variable length. (Allocated dynamically
* by attaching extra space to the end of the structure,
* depending on how many CPUs the kernel has booted up with)
*/
unsigned long cpumask[0];
};

static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
{
return to_cpumask(sg->cpumask);
}

/*
* cpumask masking which cpus in the group are allowed to iterate up the domain
* tree.
*/
static inline struct cpumask *sched_group_mask(struct sched_group *sg)
{
return to_cpumask(sg->sgp->cpumask);
}

/**
* group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
* @group: The group whose first cpu is to be returned.
*/
static inline unsigned int group_first_cpu(struct sched_group *group)
{
return cpumask_first(sched_group_cpus(group));
}

extern int group_balance_cpu(struct sched_group *sg);

#endif /* CONFIG_SMP */
Expand Down

0 comments on commit 5e6521e

Please sign in to comment.