Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 125494
b: refs/heads/master
c: 9cfb9b3
h: refs/heads/master
v: v3
  • Loading branch information
Martin Schwidefsky committed Dec 31, 2008
1 parent 5ee3a31 commit 29a851a
Show file tree
Hide file tree
Showing 10 changed files with 249 additions and 266 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6f43092441bda528dd38f2dc6c1e2522c5079fb7
refs/heads/master: 9cfb9b3c3a7361c793c031e9c3583b177ac5debd
4 changes: 2 additions & 2 deletions trunk/arch/s390/include/asm/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,12 @@ struct s390_idle_data {

DECLARE_PER_CPU(struct s390_idle_data, s390_idle);

void s390_idle_leave(void);
void vtime_start_cpu(void);

static inline void s390_idle_check(void)
{
if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
s390_idle_leave();
vtime_start_cpu();
}

#endif /* _ASM_S390_CPU_H_ */
16 changes: 7 additions & 9 deletions trunk/arch/s390/include/asm/timer.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,20 +23,18 @@ struct vtimer_list {
__u64 expires;
__u64 interval;

spinlock_t lock;
unsigned long magic;

void (*function)(unsigned long);
unsigned long data;
};

/* the offset value will wrap after ca. 71 years */
/* the vtimer value will wrap after ca. 71 years */
struct vtimer_queue {
struct list_head list;
spinlock_t lock;
__u64 to_expire; /* current event expire time */
__u64 offset; /* list offset to zero */
__u64 idle; /* temp var for idle */
__u64 timer; /* last programmed timer */
__u64 elapsed; /* elapsed time of timer expire values */
__u64 idle; /* temp var for idle */
int do_spt; /* =1: reprogram cpu timer in idle */
};

extern void init_virt_timer(struct vtimer_list *timer);
Expand All @@ -48,8 +46,8 @@ extern int del_virt_timer(struct vtimer_list *timer);
extern void init_cpu_vtimer(void);
extern void vtime_init(void);

extern void vtime_start_cpu_timer(void);
extern void vtime_stop_cpu_timer(void);
extern void vtime_stop_cpu(void);
extern void vtime_start_leave(void);

#endif /* __KERNEL__ */

Expand Down
5 changes: 3 additions & 2 deletions trunk/arch/s390/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -583,8 +583,8 @@ kernel_per:

.globl io_int_handler
io_int_handler:
stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
Expand Down Expand Up @@ -723,8 +723,8 @@ io_notify_resume:

.globl ext_int_handler
ext_int_handler:
stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
Expand All @@ -750,6 +750,7 @@ __critical_end:

.globl mcck_int_handler
mcck_int_handler:
stck __LC_INT_CLOCK
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
SAVE_ALL_BASE __LC_SAVE_AREA+32
Expand Down
5 changes: 3 additions & 2 deletions trunk/arch/s390/kernel/entry64.S
Original file line number Diff line number Diff line change
Expand Up @@ -559,8 +559,8 @@ kernel_per:
*/
.globl io_int_handler
io_int_handler:
stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
Expand Down Expand Up @@ -721,8 +721,8 @@ io_notify_resume:
*/
.globl ext_int_handler
ext_int_handler:
stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
Expand All @@ -746,6 +746,7 @@ __critical_end:
*/
.globl mcck_int_handler
mcck_int_handler:
stck __LC_INT_CLOCK
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
Expand Down
64 changes: 3 additions & 61 deletions trunk/arch/s390/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include "entry.h"

asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
Expand Down Expand Up @@ -76,35 +75,12 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sf->gprs[8];
}

DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
.lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
};

void s390_idle_leave(void)
{
struct s390_idle_data *idle;
unsigned long long idle_time;

idle = &__get_cpu_var(s390_idle);
idle_time = S390_lowcore.int_clock - idle->idle_enter;
spin_lock(&idle->lock);
idle->idle_time += idle_time;
idle->idle_enter = 0ULL;
idle->idle_count++;
spin_unlock(&idle->lock);
vtime_start_cpu_timer();
}

extern void s390_handle_mcck(void);
/*
* The idle loop on a S390...
*/
static void default_idle(void)
{
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
unsigned long addr;
psw_t psw;

/* CPU is going idle. */
local_irq_disable();
if (need_resched()) {
Expand All @@ -120,50 +96,16 @@ static void default_idle(void)
local_mcck_disable();
if (test_thread_flag(TIF_MCCK_PENDING)) {
local_mcck_enable();
s390_idle_leave();
local_irq_enable();
s390_handle_mcck();
return;
}
trace_hardirqs_on();
/* Don't trace preempt off for idle. */
stop_critical_timings();
vtime_stop_cpu_timer();

/*
* The inline assembly is equivalent to
* idle->idle_enter = get_clock();
* __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
* PSW_MASK_IO | PSW_MASK_EXT);
* The difference is that the inline assembly makes sure that
* the stck instruction is right before the lpsw instruction.
* This is done to increase the precision.
*/

/* Wait for external, I/O or machine check interrupt. */
psw.mask = psw_kernel_bits|PSW_MASK_WAIT|PSW_MASK_IO|PSW_MASK_EXT;
#ifndef __s390x__
asm volatile(
" basr %0,0\n"
"0: ahi %0,1f-0b\n"
" st %0,4(%2)\n"
" stck 0(%3)\n"
" lpsw 0(%2)\n"
"1:"
: "=&d" (addr), "=m" (idle->idle_enter)
: "a" (&psw), "a" (&idle->idle_enter), "m" (psw)
: "memory", "cc");
#else /* __s390x__ */
asm volatile(
" larl %0,1f\n"
" stg %0,8(%2)\n"
" stck 0(%3)\n"
" lpswe 0(%2)\n"
"1:"
: "=&d" (addr), "=m" (idle->idle_enter)
: "a" (&psw), "a" (&idle->idle_enter), "m" (psw)
: "memory", "cc");
#endif /* __s390x__ */
/* Stop virtual timer and halt the cpu. */
vtime_stop_cpu();
/* Reenable preemption tracer. */
start_critical_timings();
}

Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/s390/kernel/s390_ext.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,8 @@ void do_extint(struct pt_regs *regs, unsigned short code)
struct pt_regs *old_regs;

old_regs = set_irq_regs(regs);
irq_enter();
s390_idle_check();
irq_enter();
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */
clock_comparator_work();
Expand Down
Loading

0 comments on commit 29a851a

Please sign in to comment.