Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 324140
b: refs/heads/master
c: 08e875c
h: refs/heads/master
v: v3
  • Loading branch information
Catalin Marinas committed Sep 17, 2012
1 parent 0a0f04e commit 145be18
Show file tree
Hide file tree
Showing 6 changed files with 784 additions and 1 deletion.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 09b55412469dfe6797244dc5836c17ed0c2f191b
refs/heads/master: 08e875c16a16c950e1e6d85755df5f3440844675
5 changes: 5 additions & 0 deletions trunk/arch/arm64/include/asm/hardirq.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,13 @@
#include <linux/threads.h>
#include <asm/irq.h>

#define NR_IPI 4

typedef struct {
unsigned int __softirq_pending;
#ifdef CONFIG_SMP
unsigned int ipi_irqs[NR_IPI];
#endif
} ____cacheline_aligned irq_cpustat_t;

#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
Expand Down
69 changes: 69 additions & 0 deletions trunk/arch/arm64/include/asm/smp.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
/*
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASM_SMP_H
#define __ASM_SMP_H

#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>

#ifndef CONFIG_SMP
# error "<asm/smp.h> included in non-SMP build"
#endif

#define raw_smp_processor_id() (current_thread_info()->cpu)

struct seq_file;

/*
* generate IPI list text
*/
extern void show_ipi_list(struct seq_file *p, int prec);

/*
* Called from C code, this handles an IPI.
*/
extern void handle_IPI(int ipinr, struct pt_regs *regs);

/*
* Setup the set of possible CPUs (via set_cpu_possible)
*/
extern void smp_init_cpus(void);

/*
* Provide a function to raise an IPI cross call on CPUs in callmap.
*/
extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));

/*
* Called from the secondary holding pen, this is the secondary CPU entry point.
*/
asmlinkage void secondary_start_kernel(void);

/*
* Initial data for bringing up a secondary CPU.
*/
struct secondary_data {
void *stack;
};
extern struct secondary_data secondary_data;
extern void secondary_holding_pen(void);
extern volatile unsigned long secondary_holding_pen_release;

extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);

#endif /* ifndef __ASM_SMP_H */
202 changes: 202 additions & 0 deletions trunk/arch/arm64/include/asm/spinlock.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
/*
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H

#include <asm/spinlock_types.h>
#include <asm/processor.h>

/*
* Spinlock implementation.
*
* The old value is read exclusively and the new one, if unlocked, is written
* exclusively. In case of failure, the loop is restarted.
*
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
*
* Unlocked value: 0
* Locked value: 1
*/

#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int tmp;

asm volatile(
" sevl\n"
"1: wfe\n"
"2: ldaxr %w0, [%1]\n"
" cbnz %w0, 1b\n"
" stxr %w0, %w2, [%1]\n"
" cbnz %w0, 2b\n"
: "=&r" (tmp)
: "r" (&lock->lock), "r" (1)
: "memory");
}

static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int tmp;

asm volatile(
" ldaxr %w0, [%1]\n"
" cbnz %w0, 1f\n"
" stxr %w0, %w2, [%1]\n"
"1:\n"
: "=&r" (tmp)
: "r" (&lock->lock), "r" (1)
: "memory");

return !tmp;
}

static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(
" stlr %w1, [%0]\n"
: : "r" (&lock->lock), "r" (0) : "memory");
}

/*
* Write lock implementation.
*
* Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
* exclusively held.
*
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
*/

static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int tmp;

asm volatile(
" sevl\n"
"1: wfe\n"
"2: ldaxr %w0, [%1]\n"
" cbnz %w0, 1b\n"
" stxr %w0, %w2, [%1]\n"
" cbnz %w0, 2b\n"
: "=&r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
: "memory");
}

static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;

asm volatile(
" ldaxr %w0, [%1]\n"
" cbnz %w0, 1f\n"
" stxr %w0, %w2, [%1]\n"
"1:\n"
: "=&r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
: "memory");

return !tmp;
}

static inline void arch_write_unlock(arch_rwlock_t *rw)
{
asm volatile(
" stlr %w1, [%0]\n"
: : "r" (&rw->lock), "r" (0) : "memory");
}

/* write_can_lock - would write_trylock() succeed? */
#define arch_write_can_lock(x) ((x)->lock == 0)

/*
* Read lock implementation.
*
* It exclusively loads the lock value, increments it and stores the new value
* back if positive and the CPU still exclusively owns the location. If the
* value is negative, the lock is already held.
*
* During unlocking there may be multiple active read locks but no write lock.
*
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int tmp, tmp2;

asm volatile(
" sevl\n"
"1: wfe\n"
"2: ldaxr %w0, [%2]\n"
" add %w0, %w0, #1\n"
" tbnz %w0, #31, 1b\n"
" stxr %w1, %w0, [%2]\n"
" cbnz %w1, 2b\n"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
: "memory");
}

static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int tmp, tmp2;

asm volatile(
"1: ldxr %w0, [%2]\n"
" sub %w0, %w0, #1\n"
" stlxr %w1, %w0, [%2]\n"
" cbnz %w1, 1b\n"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
: "memory");
}

static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int tmp, tmp2 = 1;

asm volatile(
" ldaxr %w0, [%2]\n"
" add %w0, %w0, #1\n"
" tbnz %w0, #31, 1f\n"
" stxr %w1, %w0, [%2]\n"
"1:\n"
: "=&r" (tmp), "+r" (tmp2)
: "r" (&rw->lock)
: "memory");

return !tmp2;
}

/* read_can_lock - would read_trylock() succeed? */
#define arch_read_can_lock(x) ((x)->lock < 0x80000000)

#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)

#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()

#endif /* __ASM_SPINLOCK_H */
38 changes: 38 additions & 0 deletions trunk/arch/arm64/include/asm/spinlock_types.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
/*
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H

#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
# error "please don't include this file directly"
#endif

/* We only require natural alignment for exclusive accesses. */
#define __lock_aligned

typedef struct {
volatile unsigned int lock;
} arch_spinlock_t;

#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }

typedef struct {
volatile unsigned int lock;
} arch_rwlock_t;

#define __ARCH_RW_LOCK_UNLOCKED { 0 }

#endif
Loading

0 comments on commit 145be18

Please sign in to comment.