Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 323931
b: refs/heads/master
c: f3e9478
h: refs/heads/master
i:
  323929: 6a55965
  323927: 97810dc
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Sep 13, 2012
1 parent 35a26e5 commit 04f6cc5
Show file tree
Hide file tree
Showing 7 changed files with 2 additions and 70 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5ed4f1d96deee82ee92cd1ac1e0108c27e80e9b0
refs/heads/master: f3e947867478af9a12b9956bcd000ac7613a8a95
10 changes: 0 additions & 10 deletions trunk/Documentation/scheduler/sched-arch.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,6 @@ you must `#define __ARCH_WANT_UNLOCKED_CTXSW` in a header file
Unlocked context switches introduce only a very minor performance
penalty to the core scheduler implementation in the CONFIG_SMP case.

2. Interrupt status
By default, the switch_to arch function is called with interrupts
disabled. Interrupts may be enabled over the call if it is likely to
introduce a significant interrupt latency by adding the line
`#define __ARCH_WANT_INTERRUPTS_ON_CTXSW` in the same place as for
unlocked context switches. This define also implies
`__ARCH_WANT_UNLOCKED_CTXSW`. See arch/arm/include/asm/system.h for an
example.


CPU idle
========
Your cpu_idle routines need to obey the following rules:
Expand Down
5 changes: 0 additions & 5 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -678,11 +678,6 @@ struct signal_struct {
* (notably. ptrace) */
};

/* Context switch must be unlocked if interrupts are to be enabled */
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
# define __ARCH_WANT_UNLOCKED_CTXSW
#endif

/*
* Bits in flags field of signal_struct.
*/
Expand Down
4 changes: 0 additions & 4 deletions trunk/kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -1280,11 +1280,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
p->hardirqs_enabled = 1;
#else
p->hardirqs_enabled = 0;
#endif
p->hardirq_enable_ip = 0;
p->hardirq_enable_event = 0;
p->hardirq_disable_ip = _THIS_IP_;
Expand Down
40 changes: 1 addition & 39 deletions trunk/kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1361,25 +1361,6 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
smp_send_reschedule(cpu);
}

#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
{
struct rq *rq;
int ret = 0;

rq = __task_rq_lock(p);
if (p->on_cpu) {
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
ttwu_do_wakeup(rq, p, wake_flags);
ret = 1;
}
__task_rq_unlock(rq);

return ret;

}
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */

bool cpus_share_cache(int this_cpu, int that_cpu)
{
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
Expand Down Expand Up @@ -1440,21 +1421,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* If the owning (remote) cpu is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*/
while (p->on_cpu) {
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
/*
* In case the architecture enables interrupts in
* context_switch(), we cannot busy wait, since that
* would lead to deadlocks when an interrupt hits and
* tries to wake up @prev. So bail and do a complete
* remote wakeup.
*/
if (ttwu_activate_remote(p, wake_flags))
goto stat;
#else
while (p->on_cpu)
cpu_relax();
#endif
}
/*
* Pairs with the smp_wmb() in finish_lock_switch().
*/
Expand Down Expand Up @@ -1798,13 +1766,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
prev_state = prev->state;
account_switch_vtime(prev);
finish_arch_switch(prev);
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
local_irq_disable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
perf_event_task_sched_in(prev, current);
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
local_irq_enable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
finish_lock_switch(rq, prev);
finish_arch_post_lock_switch();

Expand Down
5 changes: 0 additions & 5 deletions trunk/kernel/sched/rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -1632,11 +1632,6 @@ static int push_rt_task(struct rq *rq)
if (!next_task)
return 0;

#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
if (unlikely(task_running(rq, next_task)))
return 0;
#endif

retry:
if (unlikely(next_task == rq->curr)) {
WARN_ON(1);
Expand Down
6 changes: 0 additions & 6 deletions trunk/kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -737,11 +737,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
*/
next->on_cpu = 1;
#endif
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
raw_spin_unlock_irq(&rq->lock);
#else
raw_spin_unlock(&rq->lock);
#endif
}

static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Expand All @@ -755,9 +751,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
smp_wmb();
prev->on_cpu = 0;
#endif
#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
local_irq_enable();
#endif
}
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */

Expand Down

0 comments on commit 04f6cc5

Please sign in to comment.