Skip to content

Commit

Permalink
use the new percpu interface for shared data
Browse files Browse the repository at this point in the history
Currently most of the per cpu data, which is accessed by different cpus,
has a ____cacheline_aligned_in_smp attribute.  Move all this data to the
new per cpu shared data section: .data.percpu.shared_aligned.

This will seperate the percpu data which is referenced frequently by other
cpus from the local only percpu data.

Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
Acked-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Fenghua Yu authored and Linus Torvalds committed Jul 19, 2007
1 parent 5fb7dc3 commit f34e3b6
Show file tree
Hide file tree
Showing 5 changed files with 5 additions and 5 deletions.
2 changes: 1 addition & 1 deletion arch/i386/kernel/init_task.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,5 +42,5 @@ EXPORT_SYMBOL(init_task);
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's.
*/
DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;

2 changes: 1 addition & 1 deletion arch/i386/kernel/irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
#include <asm/apic.h>
#include <asm/uaccess.h>

DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);

DEFINE_PER_CPU(struct pt_regs *, irq_regs);
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ static volatile struct call_data_struct *call_data;
#define IPI_KDUMP_CPU_STOP 3

/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);

extern void cpu_halt (void);

Expand Down
2 changes: 1 addition & 1 deletion arch/x86_64/kernel/init_task.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ EXPORT_SYMBOL(init_task);
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;

/* Copies of the original ist values from the tss are only accessed during
* debugging, no special alignment required.
Expand Down
2 changes: 1 addition & 1 deletion kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ struct rq {
struct lock_class_key rq_lock_key;
};

static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static DEFINE_MUTEX(sched_hotcpu_mutex);

static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
Expand Down

0 comments on commit f34e3b6

Please sign in to comment.