From ddd5466fbe02807e0810c6ec5ff8bbf8aa1e88db Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Thu, 19 Jul 2007 01:48:13 -0700 Subject: [PATCH] --- yaml --- r: 61583 b: refs/heads/master c: f34e3b61f2be9628bd41244f3ecc42009c5eced5 h: refs/heads/master i: 61581: 6c8f4a3de65ecc8a3dd00340f2c3d5a33cb297a5 61579: f2c35f8ce92bc085b4bed146c07c9526db927433 61575: 2ce948bf8f2374609611eb3dcf1999a823a7b243 61567: 56fcb65c7941bdd6824bb090d4a7731af102771d v: v3 --- [refs] | 2 +- trunk/arch/i386/kernel/init_task.c | 2 +- trunk/arch/i386/kernel/irq.c | 2 +- trunk/arch/ia64/kernel/smp.c | 2 +- trunk/arch/x86_64/kernel/init_task.c | 2 +- trunk/kernel/sched.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/[refs] b/[refs] index 2282161b1c8e..448f7f69ae41 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 5fb7dc37dc16fbc8b80d81318a582201ef7e280d +refs/heads/master: f34e3b61f2be9628bd41244f3ecc42009c5eced5 diff --git a/trunk/arch/i386/kernel/init_task.c b/trunk/arch/i386/kernel/init_task.c index cff95d10a4d8..d26fc063a760 100644 --- a/trunk/arch/i386/kernel/init_task.c +++ b/trunk/arch/i386/kernel/init_task.c @@ -42,5 +42,5 @@ EXPORT_SYMBOL(init_task); * per-CPU TSS segments. Threads are completely 'soft' on Linux, * no more per-task TSS's. */ -DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; +DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; diff --git a/trunk/arch/i386/kernel/irq.c b/trunk/arch/i386/kernel/irq.c index d2daf672f4a2..ba44d40b066d 100644 --- a/trunk/arch/i386/kernel/irq.c +++ b/trunk/arch/i386/kernel/irq.c @@ -21,7 +21,7 @@ #include #include -DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; +DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat); DEFINE_PER_CPU(struct pt_regs *, irq_regs); diff --git a/trunk/arch/ia64/kernel/smp.c b/trunk/arch/ia64/kernel/smp.c index b3a47f986e1e..9f72838db26e 100644 --- a/trunk/arch/ia64/kernel/smp.c +++ b/trunk/arch/ia64/kernel/smp.c @@ -82,7 +82,7 @@ static volatile struct call_data_struct *call_data; #define IPI_KDUMP_CPU_STOP 3 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ -static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned; +static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation); extern void cpu_halt (void); diff --git a/trunk/arch/x86_64/kernel/init_task.c b/trunk/arch/x86_64/kernel/init_task.c index 3dc5854ba21e..4ff33d4f8551 100644 --- a/trunk/arch/x86_64/kernel/init_task.c +++ b/trunk/arch/x86_64/kernel/init_task.c @@ -44,7 +44,7 @@ EXPORT_SYMBOL(init_task); * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ -DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; +DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; /* Copies of the original ist values from the tss are only accessed during * debugging, no special alignment required. diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index cb31fb4a1379..645256b228c3 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -301,7 +301,7 @@ struct rq { struct lock_class_key rq_lock_key; }; -static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp; +static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); static DEFINE_MUTEX(sched_hotcpu_mutex); static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)