Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 3295
b: refs/heads/master
c: 4866cde
h: refs/heads/master
i:
  3293: d2f133f
  3291: 4530dad
  3287: 1ebf7e0
  3279: 042dcaf
  3263: 96bd72e
v: v3
  • Loading branch information
Nick Piggin authored and Linus Torvalds committed Jun 25, 2005
1 parent bc54ab8 commit ff24905
Show file tree
Hide file tree
Showing 10 changed files with 132 additions and 98 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 48c08d3f8ff94fa118187e4d8d4a5707bb85e59d
refs/heads/master: 4866cde064afbb6c2a488c265e696879de616daa
30 changes: 4 additions & 26 deletions trunk/include/asm-arm/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -145,34 +145,12 @@ extern unsigned int user_debug;
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");

#ifdef CONFIG_SMP
/*
* Define our own context switch locking. This allows us to enable
* interrupts over the context switch, otherwise we end up with high
* interrupt latency. The real problem area is switch_mm() which may
* do a full cache flush.
* switch_mm() may do a full cache flush over the context switch,
* so enable interrupts over the context switch to avoid high
* latency.
*/
#define prepare_arch_switch(rq,next) \
do { \
spin_lock(&(next)->switch_lock); \
spin_unlock_irq(&(rq)->lock); \
} while (0)

#define finish_arch_switch(rq,prev) \
spin_unlock(&(prev)->switch_lock)

#define task_running(rq,p) \
((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
#else
/*
* Our UP-case is more simple, but we assume knowledge of how
* spin_unlock_irq() and friends are implemented. This avoids
* us needlessly decrementing and incrementing the preempt count.
*/
#define prepare_arch_switch(rq,next) local_irq_enable()
#define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock)
#define task_running(rq,p) ((rq)->curr == (p))
#endif
#define __ARCH_WANT_INTERRUPTS_ON_CTXSW

/*
* switch_to(prev, next) should switch from task `prev' to `next'
Expand Down
10 changes: 1 addition & 9 deletions trunk/include/asm-ia64/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -183,8 +183,6 @@ do { \

#ifdef __KERNEL__

#define prepare_to_switch() do { } while(0)

#ifdef CONFIG_IA32_SUPPORT
# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
#else
Expand Down Expand Up @@ -274,13 +272,7 @@ extern void ia64_load_extra (struct task_struct *task);
* of that CPU which will not be released, because there we wait for the
* tasklist_lock to become available.
*/
#define prepare_arch_switch(rq, next) \
do { \
spin_lock(&(next)->switch_lock); \
spin_unlock(&(rq)->lock); \
} while (0)
#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock)
#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
#define __ARCH_WANT_UNLOCKED_CTXSW

#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)

Expand Down
10 changes: 2 additions & 8 deletions trunk/include/asm-mips/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -422,16 +422,10 @@ extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
extern int stop_a_enabled;

/*
* Taken from include/asm-ia64/system.h; prevents deadlock on SMP
* See include/asm-ia64/system.h; prevents deadlock on SMP
* systems.
*/
#define prepare_arch_switch(rq, next) \
do { \
spin_lock(&(next)->switch_lock); \
spin_unlock(&(rq)->lock); \
} while (0)
#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock)
#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
#define __ARCH_WANT_UNLOCKED_CTXSW

#define arch_align_stack(x) (x)

Expand Down
17 changes: 3 additions & 14 deletions trunk/include/asm-s390/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,29 +104,18 @@ static inline void restore_access_regs(unsigned int *acrs)
prev = __switch_to(prev,next); \
} while (0)

#define prepare_arch_switch(rq, next) do { } while(0)
#define task_running(rq, p) ((rq)->curr == (p))

#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void account_user_vtime(struct task_struct *);
extern void account_system_vtime(struct task_struct *);

#define finish_arch_switch(rq, prev) do { \
set_fs(current->thread.mm_segment); \
spin_unlock(&(rq)->lock); \
account_system_vtime(prev); \
local_irq_enable(); \
} while (0)

#else
#define account_system_vtime(prev) do { } while (0)
#endif

#define finish_arch_switch(rq, prev) do { \
set_fs(current->thread.mm_segment); \
spin_unlock_irq(&(rq)->lock); \
account_system_vtime(prev); \
} while (0)

#endif

#define nop() __asm__ __volatile__ ("nop")

#define xchg(ptr,x) \
Expand Down
4 changes: 1 addition & 3 deletions trunk/include/asm-sparc/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,16 +101,14 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
* SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
* XXX WTF is the above comment? Found in late teen 2.4.x.
*/
#define prepare_arch_switch(rq, next) do { \
#define prepare_arch_switch(next) do { \
__asm__ __volatile__( \
".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
"save %sp, -0x40, %sp\n\t" \
"restore; restore; restore; restore; restore; restore; restore"); \
} while(0)
#define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
#define task_running(rq, p) ((rq)->curr == (p))

/* Much care has gone into this code, do not touch it.
*
Expand Down
14 changes: 4 additions & 10 deletions trunk/include/asm-sparc64/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,19 +139,13 @@ extern void __flushw_user(void);
#define flush_user_windows flushw_user
#define flush_register_windows flushw_all

#define prepare_arch_switch(rq, next) \
do { spin_lock(&(next)->switch_lock); \
spin_unlock(&(rq)->lock); \
/* Don't hold the runqueue lock over context switch */
#define __ARCH_WANT_UNLOCKED_CTXSW
#define prepare_arch_switch(next) \
do { \
flushw_all(); \
} while (0)

#define finish_arch_switch(rq, prev) \
do { spin_unlock_irq(&(prev)->switch_lock); \
} while (0)

#define task_running(rq, p) \
((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))

/* See what happens when you design the chip correctly?
*
* We tell gcc we clobber all non-fixed-usage registers except
Expand Down
1 change: 0 additions & 1 deletion trunk/include/linux/init_task.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,6 @@ extern struct group_info init_groups;
.blocked = {{0}}, \
.alloc_lock = SPIN_LOCK_UNLOCKED, \
.proc_lock = SPIN_LOCK_UNLOCKED, \
.switch_lock = SPIN_LOCK_UNLOCKED, \
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
}
Expand Down
10 changes: 8 additions & 2 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -368,6 +368,11 @@ struct signal_struct {
#endif
};

/* Context switch must be unlocked if interrupts are to be enabled */
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
# define __ARCH_WANT_UNLOCKED_CTXSW
#endif

/*
* Bits in flags field of signal_struct.
*/
Expand Down Expand Up @@ -594,6 +599,9 @@ struct task_struct {

int lock_depth; /* BKL lock depth */

#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
int oncpu;
#endif
int prio, static_prio;
struct list_head run_list;
prio_array_t *array;
Expand Down Expand Up @@ -716,8 +724,6 @@ struct task_struct {
spinlock_t alloc_lock;
/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */
spinlock_t proc_lock;
/* context-switch lock */
spinlock_t switch_lock;

/* journalling filesystem info */
void *journal_info;
Expand Down
Loading

0 comments on commit ff24905

Please sign in to comment.