Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 78032
b: refs/heads/master
c: 87353d8
h: refs/heads/master
v: v3
  • Loading branch information
Ralf Baechle committed Jan 29, 2008
1 parent d9ca7e6 commit 74984f3
Show file tree
Hide file tree
Showing 28 changed files with 682 additions and 486 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 19388fb092d89e179575bd0b44f51b57e175edf5
refs/heads/master: 87353d8ac39c52784da605ecbe965ecdfad609ad
5 changes: 5 additions & 0 deletions trunk/arch/mips/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1441,6 +1441,7 @@ config MIPS_MT_SMP
select SMP
select SYS_SUPPORTS_SCHED_SMT if SMP
select SYS_SUPPORTS_SMP
select SMP_UP
help
This is a kernel model which is also known a VSMP or lately
has been marketesed into SMVP.
Expand All @@ -1457,6 +1458,7 @@ config MIPS_MT_SMTC
select NR_CPUS_DEFAULT_8
select SMP
select SYS_SUPPORTS_SMP
select SMP_UP
help
This is a kernel model which is known a SMTC or lately has been
marketesed into SMVP.
Expand Down Expand Up @@ -1735,6 +1737,9 @@ config SMP

If you don't know what to do here, say N.

config SMP_UP
bool

config SYS_SUPPORTS_SMP
bool

Expand Down
8 changes: 8 additions & 0 deletions trunk/arch/mips/fw/arc/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

#include <asm/bootinfo.h>
#include <asm/sgialib.h>
#include <asm/smp-ops.h>

#undef DEBUG_PROM_INIT

Expand Down Expand Up @@ -48,4 +49,11 @@ void __init prom_init(void)
ArcRead(0, &c, 1, &cnt);
ArcEnterInteractiveMode();
#endif
#ifdef CONFIG_SGI_IP27
{
extern struct plat_smp_ops ip27_smp_ops;

register_smp_ops(&ip27_smp_ops);
}
#endif
}
1 change: 0 additions & 1 deletion trunk/arch/mips/kernel/mips-mt.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
#include <asm/system.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/mipsmtregs.h>
#include <asm/r4kcache.h>
#include <asm/cacheflush.h>
Expand Down
3 changes: 1 addition & 2 deletions trunk/arch/mips/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include <asm/cpu.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp-ops.h>
#include <asm/system.h>

struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
Expand Down Expand Up @@ -575,9 +576,7 @@ void __init setup_arch(char **cmdline_p)
arch_mem_init(cmdline_p);

resource_init();
#ifdef CONFIG_SMP
plat_smp_setup();
#endif
}

static int __init fpu_disable(char *s)
Expand Down
193 changes: 106 additions & 87 deletions trunk/arch/mips/kernel/smp-mt.c
Original file line number Diff line number Diff line change
Expand Up @@ -215,72 +215,67 @@ static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)
write_tc_c0_tchalt(TCHALT_H);
}

/*
* Common setup before any secondaries are started
* Make sure all CPU's are in a sensible state before we boot any of the
* secondarys
*/
void __init plat_smp_setup(void)
static void vsmp_send_ipi_single(int cpu, unsigned int action)
{
unsigned int mvpconf0, ntc, tc, ncpu = 0;
unsigned int nvpe;
int i;
unsigned long flags;
int vpflags;

#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
if (!cpu_has_mipsmt)
return;
local_irq_save(flags);

/* disable MT so we can configure */
dvpe();
dmt();
vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */

/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
switch (action) {
case SMP_CALL_FUNCTION:
i = C_SW1;
break;

mvpconf0 = read_c0_mvpconf0();
ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
case SMP_RESCHEDULE_YOURSELF:
default:
i = C_SW0;
break;
}

nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
smp_num_siblings = nvpe;
/* 1:1 mapping of vpe and tc... */
settc(cpu);
write_vpe_c0_cause(read_vpe_c0_cause() | i);
evpe(vpflags);

/* we'll always have more TC's than VPE's, so loop setting everything
to a sensible state */
for (tc = 0; tc <= ntc; tc++) {
settc(tc);
local_irq_restore(flags);
}

smp_tc_init(tc, mvpconf0);
ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
}
static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
{
unsigned int i;

/* Release config state */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
for_each_cpu_mask(i, mask)
vsmp_send_ipi_single(i, action);
}

/* We'll wait until starting the secondaries before starting MVPE */
static void __cpuinit vsmp_init_secondary(void)
{
/* Enable per-cpu interrupts */

printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
/* This is Malta specific: IPI,performance and timer inetrrupts */
write_c0_status((read_c0_status() & ~ST0_IM ) |
(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
}

void __init plat_prepare_cpus(unsigned int max_cpus)
static void __cpuinit vsmp_smp_finish(void)
{
mips_mt_set_cpuoptions();

/* set up ipi interrupts */
if (cpu_has_vint) {
set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
}
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));

cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(smp_processor_id(), mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */

setup_irq(cpu_ipi_resched_irq, &irq_resched);
setup_irq(cpu_ipi_call_irq, &irq_call);
local_irq_enable();
}

set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
static void vsmp_cpus_done(void)
{
}

/*
Expand All @@ -291,7 +286,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
* (unsigned long)idle->thread_info the gp
* assumes a 1:1 mapping of TC => VPE
*/
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
dvpe();
Expand Down Expand Up @@ -325,57 +320,81 @@ void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
evpe(EVPE_ENABLE);
}

void __cpuinit prom_init_secondary(void)
{
/* Enable per-cpu interrupts */

/* This is Malta specific: IPI,performance and timer inetrrupts */
write_c0_status((read_c0_status() & ~ST0_IM ) |
(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
}

void __cpuinit prom_smp_finish(void)
/*
* Common setup before any secondaries are started
* Make sure all CPU's are in a sensible state before we boot any of the
* secondarys
*/
static void __init vsmp_smp_setup(void)
{
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
unsigned int mvpconf0, ntc, tc, ncpu = 0;
unsigned int nvpe;

#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(smp_processor_id(), mt_fpu_cpumask);
cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
if (!cpu_has_mipsmt)
return;

local_irq_enable();
}
/* disable MT so we can configure */
dvpe();
dmt();

void prom_cpus_done(void)
{
}
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);

void core_send_ipi(int cpu, unsigned int action)
{
int i;
unsigned long flags;
int vpflags;
mvpconf0 = read_c0_mvpconf0();
ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;

local_irq_save(flags);
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
smp_num_siblings = nvpe;

vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */
/* we'll always have more TC's than VPE's, so loop setting everything
to a sensible state */
for (tc = 0; tc <= ntc; tc++) {
settc(tc);

switch (action) {
case SMP_CALL_FUNCTION:
i = C_SW1;
break;
smp_tc_init(tc, mvpconf0);
ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
}

case SMP_RESCHEDULE_YOURSELF:
default:
i = C_SW0;
break;
/* Release config state */
clear_c0_mvpcontrol(MVPCONTROL_VPC);

/* We'll wait until starting the secondaries before starting MVPE */

printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
}

static void __init vsmp_prepare_cpus(unsigned int max_cpus)
{
mips_mt_set_cpuoptions();

/* set up ipi interrupts */
if (cpu_has_vint) {
set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
}

/* 1:1 mapping of vpe and tc... */
settc(cpu);
write_vpe_c0_cause(read_vpe_c0_cause() | i);
evpe(vpflags);
cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;

local_irq_restore(flags);
setup_irq(cpu_ipi_resched_irq, &irq_resched);
setup_irq(cpu_ipi_call_irq, &irq_call);

set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
}

struct plat_smp_ops vsmp_smp_ops = {
.send_ipi_single = vsmp_send_ipi_single,
.send_ipi_mask = vsmp_send_ipi_mask,
.init_secondary = vsmp_init_secondary,
.smp_finish = vsmp_smp_finish,
.cpus_done = vsmp_cpus_done,
.boot_secondary = vsmp_boot_secondary,
.smp_setup = vsmp_smp_setup,
.prepare_cpus = vsmp_prepare_cpus,
};
23 changes: 16 additions & 7 deletions trunk/arch/mips/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/time.h>

#ifdef CONFIG_MIPS_MT_SMTC
Expand Down Expand Up @@ -84,6 +83,16 @@ static inline void set_cpu_sibling_map(int cpu)
cpu_set(cpu, cpu_sibling_map[cpu]);
}

struct plat_smp_ops *mp_ops;

__cpuinit void register_smp_ops(struct plat_smp_ops *ops)
{
if (ops)
printk(KERN_WARNING "Overriding previous set SMP ops\n");

mp_ops = ops;
}

/*
* First C code run on the secondary CPUs after being started up by
* the master.
Expand All @@ -100,7 +109,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_report();
per_cpu_trap_init();
mips_clockevent_init();
prom_init_secondary();
mp_ops->init_secondary();

/*
* XXX parity protection should be folded in here when it's converted
Expand All @@ -112,7 +121,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;

prom_smp_finish();
mp_ops->smp_finish();
set_cpu_sibling_map(cpu);

cpu_set(cpu, cpu_callin_map);
Expand Down Expand Up @@ -184,7 +193,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
smp_mb();

/* Send a message to all other CPUs and wait for them to respond */
core_send_ipi_mask(mask, SMP_CALL_FUNCTION);
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);

/* Wait for response */
/* FIXME: lock-up detection, backtrace on lock-up */
Expand Down Expand Up @@ -278,15 +287,15 @@ void smp_send_stop(void)

void __init smp_cpus_done(unsigned int max_cpus)
{
prom_cpus_done();
mp_ops->cpus_done();
}

/* called from main before smp_init() */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
init_new_context(current, &init_mm);
current_thread_info()->cpu = 0;
plat_prepare_cpus(max_cpus);
mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
#ifndef CONFIG_HOTPLUG_CPU
cpu_present_map = cpu_possible_map;
Expand Down Expand Up @@ -325,7 +334,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
if (IS_ERR(idle))
panic(KERN_ERR "Fork failed for CPU %d", cpu);

prom_boot_secondary(cpu, idle);
mp_ops->boot_secondary(cpu, idle);

/*
* Trust is futile. We should really have timeouts ...
Expand Down
Loading

0 comments on commit 74984f3

Please sign in to comment.