Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
4c8c3c7
Documentation
LICENSES
arch
alpha
arc
arm
arm64
csky
hexagon
ia64
loongarch
m68k
microblaze
mips
nios2
openrisc
parisc
boot
configs
include
kernel
syscalls
vdso32
vdso64
.gitignore
Makefile
alternative.c
asm-offsets.c
audit.c
cache.c
compat_audit.c
drivers.c
entry.S
firmware.c
ftrace.c
hardware.c
head.S
hpmc.S
inventory.c
irq.c
jump_label.c
kexec.c
kexec_file.c
kgdb.c
kprobes.c
module.c
pa7300lc.c
pacache.S
parisc_ksyms.c
patch.c
pci-dma.c
pci.c
pdc_chassis.c
pdc_cons.c
pdt.c
perf.c
perf_asm.S
perf_images.h
process.c
processor.c
ptrace.c
real2.S
relocate_kernel.S
setup.c
signal.c
signal32.c
signal32.h
smp.c
stacktrace.c
sys_parisc.c
sys_parisc32.c
syscall.S
time.c
toc.c
toc_asm.S
topology.c
traps.c
unaligned.c
unwind.c
vdso.c
vmlinux.lds.S
lib
math-emu
mm
Kbuild
Kconfig
Kconfig.debug
Makefile
defpalo.conf
install.sh
powerpc
riscv
s390
sh
sparc
um
x86
xtensa
.gitignore
Kconfig
block
certs
crypto
drivers
fs
include
init
io_uring
ipc
kernel
lib
mm
net
rust
samples
scripts
security
sound
tools
usr
virt
.clang-format
.cocciconfig
.get_maintainer.ignore
.gitattributes
.gitignore
.mailmap
.rustfmt.toml
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
Breadcrumbs
linux
/
arch
/
parisc
/
kernel
/
smp.c
Blame
Blame
Latest commit
History
History
515 lines (414 loc) · 11.3 KB
Breadcrumbs
linux
/
arch
/
parisc
/
kernel
/
smp.c
Top
File metadata and controls
Code
Blame
515 lines (414 loc) · 11.3 KB
Raw
// SPDX-License-Identifier: GPL-2.0-or-later /* ** SMP Support ** ** Copyright (C) 1999 Walt Drummond <drummond@valinux.com> ** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> ** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org> ** ** Lots of stuff stolen from arch/alpha/kernel/smp.c ** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^) ** ** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work. ** -grant (1/12/2001) ** */ #include <linux/types.h> #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched/mm.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/ftrace.h> #include <linux/cpu.h> #include <linux/kgdb.h> #include <linux/sched/hotplug.h> #include <linux/atomic.h> #include <asm/current.h> #include <asm/delay.h> #include <asm/tlbflush.h> #include <asm/io.h> #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */ #include <asm/mmu_context.h> #include <asm/page.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/unistd.h> #include <asm/cacheflush.h> #undef DEBUG_SMP #ifdef DEBUG_SMP static int smp_debug_lvl = 0; #define smp_debug(lvl, printargs...) \ if (lvl >= smp_debug_lvl) \ printk(printargs); #else #define smp_debug(lvl, ...) do { } while(0) #endif /* DEBUG_SMP */ volatile struct task_struct *smp_init_current_idle_task; /* track which CPU is booting */ static volatile int cpu_now_booting; static DEFINE_PER_CPU(spinlock_t, ipi_lock); enum ipi_message_type { IPI_NOP=0, IPI_RESCHEDULE=1, IPI_CALL_FUNC, IPI_CPU_START, IPI_CPU_STOP, IPI_CPU_TEST, #ifdef CONFIG_KGDB IPI_ENTER_KGDB, #endif }; /********** SMP inter processor interrupt and communication routines */ #undef PER_CPU_IRQ_REGION #ifdef PER_CPU_IRQ_REGION /* XXX REVISIT Ignore for now. ** *May* need this "hook" to register IPI handler ** once we have perCPU ExtIntr switch tables. */ static void ipi_init(int cpuid) { #error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region if(cpu_online(cpuid) ) { switch_to_idle_task(current); } return; } #endif /* ** Yoink this CPU from the runnable list... ** */ static void halt_processor(void) { /* REVISIT : redirect I/O Interrupts to another CPU? */ /* REVISIT : does PM *know* this CPU isn't available? */ set_cpu_online(smp_processor_id(), false); local_irq_disable(); __pdc_cpu_rendezvous(); for (;;) ; } irqreturn_t __irq_entry ipi_interrupt(int irq, void *dev_id) { int this_cpu = smp_processor_id(); struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); unsigned long ops; unsigned long flags; for (;;) { spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); spin_lock_irqsave(lock, flags); ops = p->pending_ipi; p->pending_ipi = 0; spin_unlock_irqrestore(lock, flags); mb(); /* Order bit clearing and data access. */ if (!ops) break; while (ops) { unsigned long which = ffz(~ops); ops &= ~(1 << which); switch (which) { case IPI_NOP: smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu); break; case IPI_RESCHEDULE: smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); inc_irq_stat(irq_resched_count); scheduler_ipi(); break; case IPI_CALL_FUNC: smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); inc_irq_stat(irq_call_count); generic_smp_call_function_interrupt(); break; case IPI_CPU_START: smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu); break; case IPI_CPU_STOP: smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu); halt_processor(); break; case IPI_CPU_TEST: smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu); break; #ifdef CONFIG_KGDB case IPI_ENTER_KGDB: smp_debug(100, KERN_DEBUG "CPU%d ENTER_KGDB\n", this_cpu); kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); break; #endif default: printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", this_cpu, which); return IRQ_NONE; } /* Switch */ /* before doing more, let in any pending interrupts */ if (ops) { local_irq_enable(); local_irq_disable(); } } /* while (ops) */ } return IRQ_HANDLED; } static inline void ipi_send(int cpu, enum ipi_message_type op) { struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu); spinlock_t *lock = &per_cpu(ipi_lock, cpu); unsigned long flags; spin_lock_irqsave(lock, flags); p->pending_ipi |= 1 << op; gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa); spin_unlock_irqrestore(lock, flags); } static void send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op) { int cpu; for_each_cpu(cpu, mask) ipi_send(cpu, op); } static inline void send_IPI_single(int dest_cpu, enum ipi_message_type op) { BUG_ON(dest_cpu == NO_PROC_ID); ipi_send(dest_cpu, op); } static inline void send_IPI_allbutself(enum ipi_message_type op) { int i; preempt_disable(); for_each_online_cpu(i) { if (i != smp_processor_id()) send_IPI_single(i, op); } preempt_enable(); } #ifdef CONFIG_KGDB void kgdb_roundup_cpus(void) { send_IPI_allbutself(IPI_ENTER_KGDB); } #endif inline void smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); } void arch_smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } void smp_send_all_nop(void) { send_IPI_allbutself(IPI_NOP); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_IPI_mask(mask, IPI_CALL_FUNC); } void arch_send_call_function_single_ipi(int cpu) { send_IPI_single(cpu, IPI_CALL_FUNC); } /* * Called by secondaries to update state and initialize CPU registers. */ static void smp_cpu_init(int cpunum) { extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */ /* Set modes and Enable floating point coprocessor */ init_per_cpu(cpunum); disable_sr_hashing(); mb(); /* Well, support 2.4 linux scheme as well. */ if (cpu_online(cpunum)) { extern void machine_halt(void); /* arch/parisc.../process.c */ printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum); machine_halt(); } notify_cpu_starting(cpunum); set_cpu_online(cpunum, true); /* Initialise the idle task for this CPU */ mmgrab(&init_mm); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); init_IRQ(); /* make sure no IRQs are enabled or pending */ start_cpu_itimer(); } /* * Slaves start using C here. Indirectly called from smp_slave_stext. * Do what start_kernel() and main() do for boot strap processor (aka monarch) */ void smp_callin(unsigned long pdce_proc) { int slave_id = cpu_now_booting; #ifdef CONFIG_64BIT WARN_ON(((unsigned long)(PAGE0->mem_pdc_hi) << 32 | PAGE0->mem_pdc) != pdce_proc); #endif smp_cpu_init(slave_id); flush_cache_all_local(); /* start with known state */ flush_tlb_all_local(NULL); local_irq_enable(); /* Interrupts have been off until now */ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); /* NOTREACHED */ panic("smp_callin() AAAAaaaaahhhh....\n"); } /* * Bring one cpu online. */ static int smp_boot_one_cpu(int cpuid, struct task_struct *idle) { const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); long timeout; #ifdef CONFIG_HOTPLUG_CPU int i; /* reset irq statistics for this CPU */ memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t)); for (i = 0; i < NR_IRQS; i++) { struct irq_desc *desc = irq_to_desc(i); if (desc && desc->kstat_irqs) *per_cpu_ptr(desc->kstat_irqs, cpuid) = 0; } #endif /* wait until last booting CPU has started. */ while (cpu_now_booting) ; /* Let _start know what logical CPU we're booting ** (offset into init_tasks[],cpu_data[]) */ cpu_now_booting = cpuid; /* ** boot strap code needs to know the task address since ** it also contains the process stack. */ smp_init_current_idle_task = idle ; mb(); printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa); /* ** This gets PDC to release the CPU from a very tight loop. ** ** From the PA-RISC 2.0 Firmware Architecture Reference Specification: ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which ** is executed after receiving the rendezvous signal (an interrupt to ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the ** contents of memory are valid." */ gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa); mb(); /* * OK, wait a bit for that CPU to finish staggering about. * Slave will set a bit when it reaches smp_cpu_init(). * Once the "monarch CPU" sees the bit change, it can move on. */ for (timeout = 0; timeout < 10000; timeout++) { if(cpu_online(cpuid)) { /* Which implies Slave has started up */ cpu_now_booting = 0; goto alive ; } udelay(100); barrier(); } printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); return -1; alive: /* Remember the Slave data */ smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n", cpuid, timeout * 100); return 0; } void __init smp_prepare_boot_cpu(void) { int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; /* Setup BSP mappings */ printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor); set_cpu_online(bootstrap_processor, true); set_cpu_present(bootstrap_processor, true); } /* ** inventory.c:do_inventory() hasn't yet been run and thus we ** don't 'discover' the additional CPUs until later. */ void __init smp_prepare_cpus(unsigned int max_cpus) { int cpu; for_each_possible_cpu(cpu) spin_lock_init(&per_cpu(ipi_lock, cpu)); init_cpu_present(cpumask_of(0)); } void __init smp_cpus_done(unsigned int cpu_max) { } int __cpu_up(unsigned int cpu, struct task_struct *tidle) { if (cpu_online(cpu)) return 0; if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle)) return -EIO; return cpu_online(cpu) ? 0 : -EIO; } /* * __cpu_disable runs on the processor to be shutdown. */ int __cpu_disable(void) { #ifdef CONFIG_HOTPLUG_CPU unsigned int cpu = smp_processor_id(); remove_cpu_topology(cpu); /* * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu. */ set_cpu_online(cpu, false); /* Find a new timesync master */ if (cpu == time_keeper_id) { time_keeper_id = cpumask_first(cpu_online_mask); pr_info("CPU %d is now promoted to time-keeper master\n", time_keeper_id); } disable_percpu_irq(IPI_IRQ); irq_migrate_all_off_this_cpu(); flush_cache_all_local(); flush_tlb_all_local(NULL); /* disable all irqs, including timer irq */ local_irq_disable(); /* wait for next timer irq ... */ mdelay(1000/HZ+100); /* ... and then clear all pending external irqs */ set_eiem(0); mtctl(~0UL, CR_EIRR); mfctl(CR_EIRR); mtctl(0, CR_EIRR); #endif return 0; } /* * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ void __cpu_die(unsigned int cpu) { pdc_cpu_rendezvous_lock(); if (!cpu_wait_death(cpu, 5)) { pr_crit("CPU%u: cpu didn't die\n", cpu); return; } pr_info("CPU%u: is shutting down\n", cpu); /* set task's state to interruptible sleep */ set_current_state(TASK_INTERRUPTIBLE); schedule_timeout((IS_ENABLED(CONFIG_64BIT) ? 8:2) * HZ); pdc_cpu_rendezvous_unlock(); }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
You can’t perform that action at this time.