Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 66179
b: refs/heads/master
c: 91a2fcc
h: refs/heads/master
i:
  66177: aa7679a
  66175: 8d1061e
v: v3
  • Loading branch information
Ralf Baechle committed Oct 11, 2007
1 parent 26a3dd9 commit 337cebb
Show file tree
Hide file tree
Showing 12 changed files with 89 additions and 293 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 90b02340dcc6ce00bf22c48f4865915f5989e5e4
refs/heads/master: 91a2fcc88634663e9e13dcdfad0e4a860e64aeee
4 changes: 1 addition & 3 deletions trunk/arch/mips/au1000/common/irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,6 @@
#define EXT_INTC1_REQ1 5 /* IP 5 */
#define MIPS_TIMER_IP 7 /* IP 7 */

extern void mips_timer_interrupt(void);

void (*board_init_irq)(void);

static DEFINE_SPINLOCK(irq_lock);
Expand Down Expand Up @@ -635,7 +633,7 @@ asmlinkage void plat_irq_dispatch(void)
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;

if (pending & CAUSEF_IP7)
mips_timer_interrupt();
ll_timer_interrupt(63);
else if (pending & CAUSEF_IP2)
intc0_req0_irqdispatch();
else if (pending & CAUSEF_IP3)
Expand Down
40 changes: 0 additions & 40 deletions trunk/arch/mips/au1000/common/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,48 +64,8 @@ static unsigned long last_pc0, last_match20;

static DEFINE_SPINLOCK(time_lock);

static inline void ack_r4ktimer(unsigned long newval)
{
write_c0_compare(newval);
}

/*
* There are a lot of conceptually broken versions of the MIPS timer interrupt
* handler floating around. This one is rather different, but the algorithm
* is provably more robust.
*/
unsigned long wtimer;

void mips_timer_interrupt(void)
{
int irq = 63;

irq_enter();
kstat_this_cpu.irqs[irq]++;

if (r4k_offset == 0)
goto null;

do {
kstat_this_cpu.irqs[irq]++;
do_timer(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
r4k_cur += r4k_offset;
ack_r4ktimer(r4k_cur);

} while (((unsigned long)read_c0_count()
- r4k_cur) < 0x7fffffff);

irq_exit();
return;

null:
ack_r4ktimer(0);
irq_exit();
}

#ifdef CONFIG_PM
irqreturn_t counter0_irq(int irq, void *dev_id)
{
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/mips/kernel/smtc.c
Original file line number Diff line number Diff line change
Expand Up @@ -867,7 +867,7 @@ void ipi_decode(struct smtc_ipi *pipi)
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
clock_hang_reported[dest_copy] = 0;
#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
local_timer_interrupt(0, NULL);
local_timer_interrupt(0);
irq_exit();
break;
case LINUX_SMP_IPI:
Expand Down
93 changes: 69 additions & 24 deletions trunk/arch/mips/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ void local_timer_interrupt(int irq, void *dev_id)
* High-level timer interrupt service routines. This function
* is set as irqaction->handler and is invoked through do_IRQ.
*/
irqreturn_t timer_interrupt(int irq, void *dev_id)
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
write_seqlock(&xtime_lock);

Expand Down Expand Up @@ -174,9 +174,10 @@ int null_perf_irq(void)
return 0;
}

EXPORT_SYMBOL(null_perf_irq);

int (*perf_irq)(void) = null_perf_irq;

EXPORT_SYMBOL(null_perf_irq);
EXPORT_SYMBOL(perf_irq);

/*
Expand Down Expand Up @@ -208,35 +209,79 @@ static inline int handle_perf_irq (int r2)
!r2;
}

asmlinkage void ll_timer_interrupt(int irq)
void ll_timer_interrupt(int irq, void *dev_id)
{
int r2 = cpu_has_mips_r2;
int cpu = smp_processor_id();

irq_enter();
kstat_this_cpu.irqs[irq]++;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* In an SMTC system, one Count/Compare set exists per VPE.
* Which TC within a VPE gets the interrupt is essentially
* random - we only know that it shouldn't be one with
* IXMT set. Whichever TC gets the interrupt needs to
* send special interprocessor interrupts to the other
* TCs to make sure that they schedule, etc.
*
* That code is specific to the SMTC kernel, not to
* the a particular platform, so it's invoked from
* the general MIPS timer_interrupt routine.
*/

/*
* We could be here due to timer interrupt,
* perf counter overflow, or both.
*/
(void) handle_perf_irq(1);

if (read_c0_cause() & (1 << 30)) {
/*
* There are things we only want to do once per tick
* in an "MP" system. One TC of each VPE will take
* the actual timer interrupt. The others will get
* timer broadcast IPIs. We use whoever it is that takes
* the tick on VPE 0 to run the full timer_interrupt().
*/
if (cpu_data[cpu].vpe_id == 0) {
timer_interrupt(irq, NULL);
} else {
write_c0_compare(read_c0_count() +
(mips_hpt_frequency/HZ));
local_timer_interrupt(irq, dev_id);
}
smtc_timer_broadcast(cpu_data[cpu].vpe_id);
}
#else /* CONFIG_MIPS_MT_SMTC */
int r2 = cpu_has_mips_r2;

if (handle_perf_irq(r2))
goto out;
return;

if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
goto out;

timer_interrupt(irq, NULL);

out:
irq_exit();
}

asmlinkage void ll_local_timer_interrupt(int irq)
{
irq_enter();
if (smp_processor_id() != 0)
kstat_this_cpu.irqs[irq]++;

/* we keep interrupt disabled all the time */
local_timer_interrupt(irq, NULL);
return;

irq_exit();
if (cpu == 0) {
/*
* CPU 0 handles the global timer interrupt job and process
* accounting resets count/compare registers to trigger next
* timer int.
*/
timer_interrupt(irq, NULL);
} else {
/* Everyone else needs to reset the timer int here as
ll_local_timer_interrupt doesn't */
/*
* FIXME: need to cope with counter underflow.
* More support needs to be added to kernel/time for
* counter/timer interrupts on multiple CPU's
*/
write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));

/*
* Other CPUs should do profiling and process accounting
*/
local_timer_interrupt(irq, dev_id);
}
#endif /* CONFIG_MIPS_MT_SMTC */
}

/*
Expand Down
112 changes: 5 additions & 107 deletions trunk/arch/mips/mips-boards/generic/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -67,108 +67,6 @@ static void mips_perf_dispatch(void)
do_IRQ(cp0_perfcount_irq);
}

/*
* Redeclare until I get around mopping the timer code insanity on MIPS.
*/
extern int null_perf_irq(void);

extern int (*perf_irq)(void);

/*
* Possibly handle a performance counter interrupt.
* Return true if the timer interrupt should not be checked
*/
static inline int handle_perf_irq (int r2)
{
/*
* The performance counter overflow interrupt may be shared with the
* timer interrupt (cp0_perfcount_irq < 0). If it is and a
* performance counter has overflowed (perf_irq() == IRQ_HANDLED)
* and we can't reliably determine if a counter interrupt has also
* happened (!r2) then don't check for a timer interrupt.
*/
return (cp0_perfcount_irq < 0) &&
perf_irq() == IRQ_HANDLED &&
!r2;
}

irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
{
int cpu = smp_processor_id();

#ifdef CONFIG_MIPS_MT_SMTC
/*
* In an SMTC system, one Count/Compare set exists per VPE.
* Which TC within a VPE gets the interrupt is essentially
* random - we only know that it shouldn't be one with
* IXMT set. Whichever TC gets the interrupt needs to
* send special interprocessor interrupts to the other
* TCs to make sure that they schedule, etc.
*
* That code is specific to the SMTC kernel, not to
* the a particular platform, so it's invoked from
* the general MIPS timer_interrupt routine.
*/

/*
* We could be here due to timer interrupt,
* perf counter overflow, or both.
*/
(void) handle_perf_irq(1);

if (read_c0_cause() & (1 << 30)) {
/*
* There are things we only want to do once per tick
* in an "MP" system. One TC of each VPE will take
* the actual timer interrupt. The others will get
* timer broadcast IPIs. We use whoever it is that takes
* the tick on VPE 0 to run the full timer_interrupt().
*/
if (cpu_data[cpu].vpe_id == 0) {
timer_interrupt(irq, NULL);
} else {
write_c0_compare(read_c0_count() +
(mips_hpt_frequency/HZ));
local_timer_interrupt(irq, dev_id);
}
smtc_timer_broadcast();
}
#else /* CONFIG_MIPS_MT_SMTC */
int r2 = cpu_has_mips_r2;

if (handle_perf_irq(r2))
goto out;

if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
goto out;

if (cpu == 0) {
/*
* CPU 0 handles the global timer interrupt job and process
* accounting resets count/compare registers to trigger next
* timer int.
*/
timer_interrupt(irq, NULL);
} else {
/* Everyone else needs to reset the timer int here as
ll_local_timer_interrupt doesn't */
/*
* FIXME: need to cope with counter underflow.
* More support needs to be added to kernel/time for
* counter/timer interrupts on multiple CPU's
*/
write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));

/*
* Other CPUs should do profiling and process accounting
*/
local_timer_interrupt(irq, dev_id);
}
out:
#endif /* CONFIG_MIPS_MT_SMTC */
return IRQ_HANDLED;
}

/*
* Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect
*/
Expand Down Expand Up @@ -246,7 +144,7 @@ void __init plat_time_init(void)
mips_scroll_message();
}

irqreturn_t mips_perf_interrupt(int irq, void *dev_id)
static irqreturn_t mips_perf_interrupt(int irq, void *dev_id)
{
return perf_irq();
}
Expand All @@ -257,8 +155,10 @@ static struct irqaction perf_irqaction = {
.name = "performance",
};

void __init plat_perf_setup(struct irqaction *irq)
void __init plat_perf_setup(void)
{
struct irqaction *irq = &perf_irqaction;

cp0_perfcount_irq = -1;

#ifdef MSC01E_INT_BASE
Expand Down Expand Up @@ -297,8 +197,6 @@ void __init plat_timer_setup(struct irqaction *irq)
mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
}

/* we are using the cpu counter for timer interrupts */
irq->handler = mips_timer_interrupt; /* we use our own handler */
#ifdef CONFIG_MIPS_MT_SMTC
setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << cp0_compare_irq);
#else
Expand All @@ -308,5 +206,5 @@ void __init plat_timer_setup(struct irqaction *irq)
set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
#endif

plat_perf_setup(&perf_irqaction);
plat_perf_setup();
}
Loading

0 comments on commit 337cebb

Please sign in to comment.