Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 360158
b: refs/heads/master
c: 41195d2
h: refs/heads/master
v: v3
  • Loading branch information
Vineet Gupta committed Feb 15, 2013
1 parent a148fbb commit 30f2b4c
Show file tree
Hide file tree
Showing 24 changed files with 961 additions and 3 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0ef88a54aa341f754707414500158addbf35c780
refs/heads/master: 41195d236e84458bebd4fdc218610a92231ac791
39 changes: 38 additions & 1 deletion trunk/arch/arc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -116,9 +116,42 @@ config CPU_BIG_ENDIAN
help
Build kernel for Big Endian Mode of ARC CPU

config SMP
bool "Symmetric Multi-Processing (Incomplete)"
default n
select USE_GENERIC_SMP_HELPERS
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
you have a system with more than one CPU, say Y.

if SMP

config ARC_HAS_COH_CACHES
def_bool n

config ARC_HAS_COH_LLSC
def_bool n

config ARC_HAS_COH_RTSC
def_bool n

config ARC_HAS_REENTRANT_IRQ_LV2
def_bool n

endif

config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
depends on SMP
default "2"

menuconfig ARC_CACHE
bool "Enable Cache Support"
default y
# if SMP, cache enabled ONLY if ARC implementation has cache coherency
depends on !SMP || ARC_HAS_COH_CACHES

if ARC_CACHE

Expand Down Expand Up @@ -213,6 +246,8 @@ config ARC_COMPACT_IRQ_LEVELS
default n
# Timer HAS to be high priority, for any other high priority config
select ARC_IRQ3_LV2
# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2

if ARC_COMPACT_IRQ_LEVELS

Expand Down Expand Up @@ -261,6 +296,8 @@ config ARC_HAS_RTSC
bool "Insn: RTSC (64-bit r/o cycle counter)"
default y
depends on ARC_CPU_REL_4_10
# if SMP, enable RTSC only if counter is coherent across cores
depends on !SMP || ARC_HAS_COH_RTSC

endmenu # "ARC CPU Configuration"

Expand Down Expand Up @@ -309,7 +346,7 @@ menuconfig ARC_DBG

config ARC_DBG_TLB_PARANOIA
bool "Paranoia Checks in Low Level TLB Handlers"
depends on ARC_DBG
depends on ARC_DBG && !SMP
default n

config ARC_DBG_TLB_MISS_COUNT
Expand Down
3 changes: 3 additions & 0 deletions trunk/arch/arc/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -133,3 +133,6 @@ archclean:
# Thus forcing all exten calls in this file to be long calls
export CFLAGS_decompress_inflate.o = -mmedium-calls
export CFLAGS_initramfs.o = -mmedium-calls
ifdef CONFIG_SMP
export CFLAGS_core.o = -mmedium-calls
endif
49 changes: 49 additions & 0 deletions trunk/arch/arc/include/asm/entry.h
Original file line number Diff line number Diff line change
Expand Up @@ -389,11 +389,19 @@
* to be saved again on kernel mode stack, as part of ptregs.
*-------------------------------------------------------------*/
.macro EXCPN_PROLOG_FREEUP_REG reg
#ifdef CONFIG_SMP
sr \reg, [ARC_REG_SCRATCH_DATA0]
#else
st \reg, [@ex_saved_reg1]
#endif
.endm

.macro EXCPN_PROLOG_RESTORE_REG reg
#ifdef CONFIG_SMP
lr \reg, [ARC_REG_SCRATCH_DATA0]
#else
ld \reg, [@ex_saved_reg1]
#endif
.endm

/*--------------------------------------------------------------
Expand Down Expand Up @@ -508,7 +516,11 @@
/* restore original r9 , saved in int1_saved_reg
* It will be saved on stack in macro: SAVE_CALLER_SAVED
*/
#ifdef CONFIG_SMP
lr r9, [ARC_REG_SCRATCH_DATA0]
#else
ld r9, [@int1_saved_reg]
#endif

/* now we are ready to save the remaining context :) */
st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */
Expand Down Expand Up @@ -639,6 +651,41 @@
bmsk \reg, \reg, 7
.endm

#ifdef CONFIG_SMP

/*-------------------------------------------------
* Retrieve the current running task on this CPU
* 1. Determine curr CPU id.
* 2. Use it to index into _current_task[ ]
*/
.macro GET_CURR_TASK_ON_CPU reg
GET_CPU_ID \reg
ld.as \reg, [@_current_task, \reg]
.endm

/*-------------------------------------------------
* Save a new task as the "current" task on this CPU
* 1. Determine curr CPU id.
* 2. Use it to index into _current_task[ ]
*
* Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
* because ST r0, [r1, offset] can ONLY have s9 @offset
* while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
*/

.macro SET_CURR_TASK_ON_CPU tsk, tmp
GET_CPU_ID \tmp
add2 \tmp, @_current_task, \tmp
st \tsk, [\tmp]
#ifdef CONFIG_ARC_CURR_IN_REG
mov r25, \tsk
#endif

.endm


#else /* Uniprocessor implementation of macros */

.macro GET_CURR_TASK_ON_CPU reg
ld \reg, [@_current_task]
.endm
Expand All @@ -650,6 +697,8 @@
#endif
.endm

#endif /* SMP / UNI */

/* ------------------------------------------------------------------
* Get the ptr to some field of Current Task at @off in task struct
* -Uses r25 for Current task ptr if that is enabled
Expand Down
4 changes: 4 additions & 0 deletions trunk/arch/arc/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -147,8 +147,10 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
#ifndef CONFIG_SMP
/* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
#endif

/*
* Get a new ASID if task doesn't have a valid one. Possible when
Expand Down Expand Up @@ -197,7 +199,9 @@ static inline void destroy_context(struct mm_struct *mm)

static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
#ifndef CONFIG_SMP
write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
#endif

/* Unconditionally get a new ASID */
get_new_mmu_context(next);
Expand Down
9 changes: 9 additions & 0 deletions trunk/arch/arc/include/asm/mutex.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,13 @@
* published by the Free Software Foundation.
*/

/*
* xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
* atomic dec based which can "count" any number of lock contenders.
* This ideally needs to be fixed in core, but for now switching to dec ver.
*/
#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
#include <asm-generic/mutex-dec.h>
#else
#include <asm-generic/mutex-xchg.h>
#endif
4 changes: 4 additions & 0 deletions trunk/arch/arc/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -354,11 +354,15 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
* Thus use this macro only when you are certain that "current" is current
* e.g. when dealing with signal frame setup code etc
*/
#ifndef CONFIG_SMP
#define pgd_offset_fast(mm, addr) \
({ \
pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
pgd_base + pgd_index(addr); \
})
#else
#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
#endif

extern void paging_init(void);
extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
Expand Down
8 changes: 8 additions & 0 deletions trunk/arch/arc/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,15 @@ unsigned long thread_saved_pc(struct task_struct *t);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)

/*
* A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
* get optimised away by gcc
*/
#ifdef CONFIG_SMP
#define cpu_relax() __asm__ __volatile__ ("" : : : "memory")
#else
#define cpu_relax() do { } while (0)
#endif

#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
Expand Down
107 changes: 107 additions & 0 deletions trunk/arch/arc/include/asm/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,69 @@
#ifndef __ASM_ARC_SMP_H
#define __ASM_ARC_SMP_H

#ifdef CONFIG_SMP

#include <linux/types.h>
#include <linux/init.h>
#include <linux/threads.h>

#define raw_smp_processor_id() (current_thread_info()->cpu)

/* including cpumask.h leads to cyclic deps hence this Forward declaration */
struct cpumask;

/*
* APIs provided by arch SMP code to generic code
*/
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);

/*
* APIs provided by arch SMP code to rest of arch code
*/
extern void __init smp_init_cpus(void);
extern void __init first_lines_of_secondary(void);

/*
* API expected BY platform smp code (FROM arch smp code)
*
* smp_ipi_irq_setup:
* Takes @cpu and @irq to which the arch-common ISR is hooked up
*/
extern int smp_ipi_irq_setup(int cpu, int irq);

/*
* APIs expected FROM platform smp code
*
* arc_platform_smp_cpuinfo:
* returns a string containing info for /proc/cpuinfo
*
* arc_platform_smp_init_cpu:
* Called from start_kernel_secondary to do any CPU local setup
* such as starting a timer, setting up IPI etc
*
* arc_platform_smp_wait_to_boot:
* Called from early bootup code for non-Master CPUs to "park" them
*
* arc_platform_smp_wakeup_cpu:
* Called from __cpu_up (Master CPU) to kick start another one
*
* arc_platform_ipi_send:
* Takes @cpumask to which IPI(s) would be sent.
* The actual msg-id/buffer is manager in arch-common code
*
* arc_platform_ipi_clear:
* Takes @cpu which got IPI at @irq to do any IPI clearing
*/
extern const char *arc_platform_smp_cpuinfo(void);
extern void arc_platform_smp_init_cpu(void);
extern void arc_platform_smp_wait_to_boot(int cpu);
extern void arc_platform_smp_wakeup_cpu(int cpu, unsigned long pc);
extern void arc_platform_ipi_send(const struct cpumask *callmap);
extern void arc_platform_ipi_clear(int cpu, int irq);

#endif /* CONFIG_SMP */

/*
* ARC700 doesn't support atomic Read-Modify-Write ops.
* Originally Interrupts had to be disabled around code to gaurantee atomicity.
Expand All @@ -18,17 +81,61 @@
*
* (1) These insn were introduced only in 4.10 release. So for older released
* support needed.
*
* (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
* gaurantted by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
*
* However exported spinlock API is not usable due to cyclic hdr deps
* (even after system.h disintegration upstream)
* asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
* -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
*
* So the workaround is to use the lowest level arch spinlock API.
* The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
* but same is not true for ARCH backend, hence the need for 2 variants
*/
#ifndef CONFIG_ARC_HAS_LLSC

#include <linux/irqflags.h>
#ifdef CONFIG_SMP

#include <asm/spinlock.h>

extern arch_spinlock_t smp_atomic_ops_lock;
extern arch_spinlock_t smp_bitops_lock;

#define atomic_ops_lock(flags) do { \
local_irq_save(flags); \
arch_spin_lock(&smp_atomic_ops_lock); \
} while (0)

#define atomic_ops_unlock(flags) do { \
arch_spin_unlock(&smp_atomic_ops_lock); \
local_irq_restore(flags); \
} while (0)

#define bitops_lock(flags) do { \
local_irq_save(flags); \
arch_spin_lock(&smp_bitops_lock); \
} while (0)

#define bitops_unlock(flags) do { \
arch_spin_unlock(&smp_bitops_lock); \
local_irq_restore(flags); \
} while (0)

#else /* !CONFIG_SMP */

#define atomic_ops_lock(flags) local_irq_save(flags)
#define atomic_ops_unlock(flags) local_irq_restore(flags)

#define bitops_lock(flags) local_irq_save(flags)
#define bitops_unlock(flags) local_irq_restore(flags)

#endif /* !CONFIG_SMP */

#endif /* !CONFIG_ARC_HAS_LLSC */

#endif
1 change: 1 addition & 0 deletions trunk/arch/arc/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o clk.o
obj-y += devtree.o

obj-$(CONFIG_MODULES) += arcksyms.o module.o
obj-$(CONFIG_SMP) += smp.o

obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o
CFLAGS_fpu.o += -mdpfp
Expand Down
11 changes: 11 additions & 0 deletions trunk/arch/arc/kernel/ctx_sw.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,18 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
* For SMP extra work to get to &_current_task[cpu]
* (open coded SET_CURR_TASK_ON_CPU)
*/
#ifndef CONFIG_SMP
"st %2, [@_current_task] \n\t"
#else
"lr r24, [identity] \n\t"
"lsr r24, r24, 8 \n\t"
"bmsk r24, r24, 7 \n\t"
"add2 r24, @_current_task, r24 \n\t"
"st %2, [r24] \n\t"
#endif
#ifdef CONFIG_ARC_CURR_IN_REG
"mov r25, %2 \n\t"
#endif

/* get ksp of incoming task from tsk->thread.ksp */
"ld.as sp, [%2, %1] \n\t"
Expand Down
Loading

0 comments on commit 30f2b4c

Please sign in to comment.