Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel…
Browse files Browse the repository at this point in the history
…/git/tj/percpu

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu:
  percpu: add __percpu sparse annotations to what's left
  percpu: add __percpu sparse annotations to fs
  percpu: add __percpu sparse annotations to core kernel subsystems
  local_t: Remove leftover local.h
  this_cpu: Remove pageset_notifier
  this_cpu: Page allocator conversion
  percpu, x86: Generic inc / dec percpu instructions
  local_t: Move local.h include to ringbuffer.c and ring_buffer_benchmark.c
  module: Use this_cpu_xx to dynamically allocate counters
  local_t: Remove cpu_local_xx macros
  percpu: refactor the code in pcpu_[de]populate_chunk()
  percpu: remove compile warnings caused by __verify_pcpu_ptr()
  percpu: make accessors check for percpu pointer in sparse
  percpu: add __percpu for sparse.
  percpu: make access macros universal
  percpu: remove per_cpu__ prefix.
  • Loading branch information
Linus Torvalds committed Mar 3, 2010
2 parents 4850f52 + a29d8b8 commit 0a135ba
Show file tree
Hide file tree
Showing 60 changed files with 354 additions and 499 deletions.
17 changes: 0 additions & 17 deletions arch/alpha/include/asm/local.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,21 +98,4 @@ static __inline__ long local_sub_return(long i, local_t * l)
#define __local_add(i,l) ((l)->a.counter+=(i))
#define __local_sub(i,l) ((l)->a.counter-=(i))

/* Use these for per-cpu local_t variables: on some archs they are
* much more efficient than these naive implementations. Note they take
* a variable, not an address.
*/
#define cpu_local_read(l) local_read(&__get_cpu_var(l))
#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))

#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))

#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))

#endif /* _ALPHA_LOCAL_H */
4 changes: 2 additions & 2 deletions arch/blackfin/mach-common/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -816,8 +816,8 @@ ENDPROC(_resume)

ENTRY(_ret_from_exception)
#ifdef CONFIG_IPIPE
p2.l = _per_cpu__ipipe_percpu_domain;
p2.h = _per_cpu__ipipe_percpu_domain;
p2.l = _ipipe_percpu_domain;
p2.h = _ipipe_percpu_domain;
r0.l = _ipipe_root;
r0.h = _ipipe_root;
r2 = [p2];
Expand Down
2 changes: 1 addition & 1 deletion arch/cris/arch-v10/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ mmu_bus_fault:
1: btstq 12, $r1 ; Refill?
bpl 2f
lsrq 24, $r1 ; Get PGD index (bit 24-31)
move.d [per_cpu__current_pgd], $r0 ; PGD for the current process
move.d [current_pgd], $r0 ; PGD for the current process
move.d [$r0+$r1.d], $r0 ; Get PMD
beq 2f
nop
Expand Down
2 changes: 1 addition & 1 deletion arch/cris/arch-v32/mm/mmu.S
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@
#ifdef CONFIG_SMP
move $s7, $acr ; PGD
#else
move.d per_cpu__current_pgd, $acr ; PGD
move.d current_pgd, $acr ; PGD
#endif
; Look up PMD in PGD
lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
Expand Down
4 changes: 2 additions & 2 deletions arch/ia64/include/asm/percpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE

#ifdef __ASSEMBLY__
# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */
# define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */
#else /* !__ASSEMBLY__ */


Expand Down Expand Up @@ -39,7 +39,7 @@ extern void *per_cpu_init(void);
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
* more efficient.
*/
#define __ia64_per_cpu_var(var) per_cpu__##var
#define __ia64_per_cpu_var(var) var

#include <asm-generic/percpu.h>

Expand Down
4 changes: 2 additions & 2 deletions arch/ia64/kernel/ia64_ksyms.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
#endif

#include <asm/processor.h>
EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
EXPORT_SYMBOL(ia64_cpu_info);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
EXPORT_SYMBOL(local_per_cpu_offset);
#endif

#include <asm/uaccess.h>
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/mm/discontig.c
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,7 @@ static void __init initialize_pernode_data(void)
cpu = 0;
node = node_cpuid[cpu].nid;
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
((char *)&per_cpu__ia64_cpu_info - __per_cpu_start));
((char *)&ia64_cpu_info - __per_cpu_start));
cpu0_cpu_info->node_data = mem_data[node].node_data;
}
#endif /* CONFIG_SMP */
Expand Down
25 changes: 0 additions & 25 deletions arch/m32r/include/asm/local.h
Original file line number Diff line number Diff line change
Expand Up @@ -338,29 +338,4 @@ static inline void local_set_mask(unsigned long mask, local_t *addr)
* a variable, not an address.
*/

/* Need to disable preemption for the cpu local counters otherwise we could
still access a variable of a previous CPU in a non local way. */
#define cpu_local_wrap_v(l) \
({ local_t res__; \
preempt_disable(); \
res__ = (l); \
preempt_enable(); \
res__; })
#define cpu_local_wrap(l) \
({ preempt_disable(); \
l; \
preempt_enable(); }) \

#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))

#define __cpu_local_inc(l) cpu_local_inc(l)
#define __cpu_local_dec(l) cpu_local_dec(l)
#define __cpu_local_add(i, l) cpu_local_add((i), (l))
#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))

#endif /* __M32R_LOCAL_H */
2 changes: 1 addition & 1 deletion arch/microblaze/include/asm/entry.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
* places
*/

#define PER_CPU(var) per_cpu__##var
#define PER_CPU(var) var

# ifndef __ASSEMBLY__
DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
Expand Down
25 changes: 0 additions & 25 deletions arch/mips/include/asm/local.h
Original file line number Diff line number Diff line change
Expand Up @@ -193,29 +193,4 @@ static __inline__ long local_sub_return(long i, local_t * l)
#define __local_add(i, l) ((l)->a.counter+=(i))
#define __local_sub(i, l) ((l)->a.counter-=(i))

/* Need to disable preemption for the cpu local counters otherwise we could
still access a variable of a previous CPU in a non atomic way. */
#define cpu_local_wrap_v(l) \
({ local_t res__; \
preempt_disable(); \
res__ = (l); \
preempt_enable(); \
res__; })
#define cpu_local_wrap(l) \
({ preempt_disable(); \
l; \
preempt_enable(); }) \

#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))

#define __cpu_local_inc(l) cpu_local_inc(l)
#define __cpu_local_dec(l) cpu_local_dec(l)
#define __cpu_local_add(i, l) cpu_local_add((i), (l))
#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))

#endif /* _ARCH_MIPS_LOCAL_H */
8 changes: 4 additions & 4 deletions arch/parisc/lib/fixup.S
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@
#endif
/* t2 = &__per_cpu_offset[smp_processor_id()]; */
LDREGX \t2(\t1),\t2
addil LT%per_cpu__exception_data,%r27
LDREG RT%per_cpu__exception_data(%r1),\t1
addil LT%exception_data,%r27
LDREG RT%exception_data(%r1),\t1
/* t1 = &__get_cpu_var(exception_data) */
add,l \t1,\t2,\t1
/* t1 = t1->fault_ip */
Expand All @@ -46,8 +46,8 @@
#else
.macro get_fault_ip t1 t2
/* t1 = &__get_cpu_var(exception_data) */
addil LT%per_cpu__exception_data,%r27
LDREG RT%per_cpu__exception_data(%r1),\t2
addil LT%exception_data,%r27
LDREG RT%exception_data(%r1),\t2
/* t1 = t2->fault_ip */
LDREG EXCDATA_IP(\t2), \t1
.endm
Expand Down
25 changes: 0 additions & 25 deletions arch/powerpc/include/asm/local.h
Original file line number Diff line number Diff line change
Expand Up @@ -172,29 +172,4 @@ static __inline__ long local_dec_if_positive(local_t *l)
#define __local_add(i,l) ((l)->a.counter+=(i))
#define __local_sub(i,l) ((l)->a.counter-=(i))

/* Need to disable preemption for the cpu local counters otherwise we could
still access a variable of a previous CPU in a non atomic way. */
#define cpu_local_wrap_v(l) \
({ local_t res__; \
preempt_disable(); \
res__ = (l); \
preempt_enable(); \
res__; })
#define cpu_local_wrap(l) \
({ preempt_disable(); \
l; \
preempt_enable(); }) \

#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))

#define __cpu_local_inc(l) cpu_local_inc(l)
#define __cpu_local_dec(l) cpu_local_dec(l)
#define __cpu_local_add(i, l) cpu_local_add((i), (l))
#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))

#endif /* _ARCH_POWERPC_LOCAL_H */
7 changes: 3 additions & 4 deletions arch/sparc/kernel/nmi.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@

#include <asm/perf_event.h>
#include <asm/ptrace.h>
#include <asm/local.h>
#include <asm/pcr.h>

/* We don't have a real NMI on sparc64, but we can fake one
Expand Down Expand Up @@ -113,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
touched = 1;
}
if (!touched && __get_cpu_var(last_irq_sum) == sum) {
__this_cpu_inc(per_cpu_var(alert_counter));
if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz)
__this_cpu_inc(alert_counter);
if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
die_nmi("BUG: NMI Watchdog detected LOCKUP",
regs, panic_on_timeout);
} else {
__get_cpu_var(last_irq_sum) = sum;
__this_cpu_write(per_cpu_var(alert_counter), 0);
__this_cpu_write(alert_counter, 0);
}
if (__get_cpu_var(wd_enabled)) {
write_pic(picl_value(nmi_hz));
Expand Down
8 changes: 4 additions & 4 deletions arch/sparc/kernel/rtrap_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -149,11 +149,11 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
rtrap_irq:
rtrap:
#ifndef CONFIG_SMP
sethi %hi(per_cpu____cpu_data), %l0
lduw [%l0 + %lo(per_cpu____cpu_data)], %l1
sethi %hi(__cpu_data), %l0
lduw [%l0 + %lo(__cpu_data)], %l1
#else
sethi %hi(per_cpu____cpu_data), %l0
or %l0, %lo(per_cpu____cpu_data), %l0
sethi %hi(__cpu_data), %l0
or %l0, %lo(__cpu_data), %l0
lduw [%l0 + %g5], %l1
#endif
cmp %l1, 0
Expand Down
37 changes: 0 additions & 37 deletions arch/x86/include/asm/local.h
Original file line number Diff line number Diff line change
Expand Up @@ -195,41 +195,4 @@ static inline long local_sub_return(long i, local_t *l)
#define __local_add(i, l) local_add((i), (l))
#define __local_sub(i, l) local_sub((i), (l))

/* Use these for per-cpu local_t variables: on some archs they are
* much more efficient than these naive implementations. Note they take
* a variable, not an address.
*
* X86_64: This could be done better if we moved the per cpu data directly
* after GS.
*/

/* Need to disable preemption for the cpu local counters otherwise we could
still access a variable of a previous CPU in a non atomic way. */
#define cpu_local_wrap_v(l) \
({ \
local_t res__; \
preempt_disable(); \
res__ = (l); \
preempt_enable(); \
res__; \
})
#define cpu_local_wrap(l) \
({ \
preempt_disable(); \
(l); \
preempt_enable(); \
}) \

#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))

#define __cpu_local_inc(l) cpu_local_inc((l))
#define __cpu_local_dec(l) cpu_local_dec((l))
#define __cpu_local_add(i, l) cpu_local_add((i), (l))
#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))

#endif /* _ASM_X86_LOCAL_H */
Loading

0 comments on commit 0a135ba

Please sign in to comment.