Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 69094
b: refs/heads/master
c: a995744
h: refs/heads/master
v: v3
  • Loading branch information
Alexey Dobriyan authored and Ingo Molnar committed Oct 15, 2007
1 parent a9684c2 commit d6ec03d
Show file tree
Hide file tree
Showing 4 changed files with 25 additions and 25 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 155bb293ae8387526e6e07d42b1691104e55d9a2
refs/heads/master: a9957449b08ab561a33e1e038df06843b8d8dd9f
44 changes: 22 additions & 22 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -608,7 +608,7 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
return rq;
}

static inline void __task_rq_unlock(struct rq *rq)
static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
spin_unlock(&rq->lock);
Expand All @@ -623,7 +623,7 @@ static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
/*
* this_rq_lock - lock this runqueue and disable interrupts.
*/
static inline struct rq *this_rq_lock(void)
static struct rq *this_rq_lock(void)
__acquires(rq->lock)
{
struct rq *rq;
Expand Down Expand Up @@ -985,20 +985,6 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
inc_nr_running(p, rq);
}

/*
* activate_idle_task - move idle task to the _front_ of runqueue.
*/
static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
{
update_rq_clock(rq);

if (p->state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible--;

enqueue_task(rq, p, 0);
inc_nr_running(p, rq);
}

/*
* deactivate_task - remove a task from the runqueue.
*/
Expand Down Expand Up @@ -1206,7 +1192,7 @@ void kick_process(struct task_struct *p)
* We want to under-estimate the load of migration sources, to
* balance conservatively.
*/
static inline unsigned long source_load(int cpu, int type)
static unsigned long source_load(int cpu, int type)
{
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
Expand All @@ -1221,7 +1207,7 @@ static inline unsigned long source_load(int cpu, int type)
* Return a high guess at the load of a migration-target cpu weighted
* according to the scheduling class and "nice" value.
*/
static inline unsigned long target_load(int cpu, int type)
static unsigned long target_load(int cpu, int type)
{
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
Expand Down Expand Up @@ -1813,7 +1799,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
static void finish_task_switch(struct rq *rq, struct task_struct *prev)
__releases(rq->lock)
{
struct mm_struct *mm = rq->prev_mm;
Expand Down Expand Up @@ -3020,7 +3006,7 @@ static DEFINE_SPINLOCK(balancing);
*
* Balancing parameters are set up in arch_init_sched_domains.
*/
static inline void rebalance_domains(int cpu, enum cpu_idle_type idle)
static void rebalance_domains(int cpu, enum cpu_idle_type idle)
{
int balance = 1;
struct rq *rq = cpu_rq(cpu);
Expand Down Expand Up @@ -4140,7 +4126,7 @@ struct task_struct *idle_task(int cpu)
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
*/
static inline struct task_struct *find_process_by_pid(pid_t pid)
static struct task_struct *find_process_by_pid(pid_t pid)
{
return pid ? find_task_by_pid(pid) : current;
}
Expand Down Expand Up @@ -5156,6 +5142,20 @@ static void migrate_live_tasks(int src_cpu)
write_unlock_irq(&tasklist_lock);
}

/*
* activate_idle_task - move idle task to the _front_ of runqueue.
*/
static void activate_idle_task(struct task_struct *p, struct rq *rq)
{
update_rq_clock(rq);

if (p->state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible--;

enqueue_task(rq, p, 0);
inc_nr_running(p, rq);
}

/*
* Schedules idle task to be the next runnable task on current CPU.
* It does so by boosting its priority to highest possible and adding it to
Expand Down Expand Up @@ -6494,7 +6494,7 @@ int in_sched_functions(unsigned long addr)
&& addr < (unsigned long)__sched_text_end);
}

static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
{
cfs_rq->tasks_timeline = RB_ROOT;
#ifdef CONFIG_FAIR_GROUP_SCHED
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -892,7 +892,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
* achieve that by always pre-iterating before returning
* the current task:
*/
static inline struct task_struct *
static struct task_struct *
__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
{
struct task_struct *p;
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
*/
static inline void update_curr_rt(struct rq *rq)
static void update_curr_rt(struct rq *rq)
{
struct task_struct *curr = rq->curr;
u64 delta_exec;
Expand Down

0 comments on commit d6ec03d

Please sign in to comment.