Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 175885
b: refs/heads/master
c: 3688647
h: refs/heads/master
i:
  175883: e266aac
v: v3
  • Loading branch information
Tejun Heo committed Oct 2, 2009
1 parent 1ae52e5 commit cbf6755
Show file tree
Hide file tree
Showing 4 changed files with 58 additions and 31 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 12cda817779ce5381a9a4ba8d464abe17c50a9e2
refs/heads/master: 36886478f59ec0fdc24a8877c572b92f8d416aba
11 changes: 6 additions & 5 deletions trunk/arch/ia64/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,12 @@ SECTIONS
}
#endif

#ifdef CONFIG_SMP
. = ALIGN(PERCPU_PAGE_SIZE);
__cpu0_per_cpu = .;
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
#endif

. = ALIGN(PAGE_SIZE);
__init_end = .;

Expand Down Expand Up @@ -198,11 +204,6 @@ SECTIONS
data : { } :data
.data : AT(ADDR(.data) - LOAD_OFFSET)
{
#ifdef CONFIG_SMP
. = ALIGN(PERCPU_PAGE_SIZE);
__cpu0_per_cpu = .;
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
#endif
INIT_TASK_DATA(PAGE_SIZE)
CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
READ_MOSTLY_DATA(SMP_CACHE_BYTES)
Expand Down
41 changes: 27 additions & 14 deletions trunk/arch/ia64/mm/contig.c
Original file line number Diff line number Diff line change
Expand Up @@ -154,36 +154,49 @@ static void *cpu_data;
void * __cpuinit
per_cpu_init (void)
{
int cpu;
static int first_time=1;
static bool first_time = true;
void *cpu0_data = __cpu0_per_cpu;
unsigned int cpu;

if (!first_time)
goto skip;
first_time = false;

/*
* get_free_pages() cannot be used before cpu_init() done. BSP
* allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
* get_zeroed_page().
*/
if (first_time) {
void *cpu0_data = __cpu0_per_cpu;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;

first_time=0;
memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];

__per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
/*
* percpu area for cpu0 is moved from the __init area
* which is setup by head.S and used till this point.
* Update ar.k3. This move is ensures that percpu
* area for cpu0 is on the correct node and its
* virtual address isn't insanely far from other
* percpu areas which is important for congruent
* percpu allocator.
*/
if (cpu == 0)
ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
(unsigned long)__per_cpu_start);

for (cpu = 1; cpu < NR_CPUS; cpu++) {
memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
cpu_data += PERCPU_PAGE_SIZE;
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
}
cpu_data += PERCPU_PAGE_SIZE;
}
skip:
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
}

static inline void
alloc_per_cpu_data(void)
{
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
#else
Expand Down
35 changes: 24 additions & 11 deletions trunk/arch/ia64/mm/discontig.c
Original file line number Diff line number Diff line change
Expand Up @@ -143,17 +143,30 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
int cpu;

for_each_possible_early_cpu(cpu) {
if (cpu == 0) {
void *cpu0_data = __cpu0_per_cpu;
__per_cpu_offset[cpu] = (char*)cpu0_data -
__per_cpu_start;
} else if (node == node_cpuid[cpu].nid) {
memcpy(__va(cpu_data), __phys_per_cpu_start,
__per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
__per_cpu_start;
cpu_data += PERCPU_PAGE_SIZE;
}
void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;

if (node != node_cpuid[cpu].nid)
continue;

memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *)__va(cpu_data) -
__per_cpu_start;

/*
* percpu area for cpu0 is moved from the __init area
* which is setup by head.S and used till this point.
* Update ar.k3. This move is ensures that percpu
* area for cpu0 is on the correct node and its
* virtual address isn't insanely far from other
* percpu areas which is important for congruent
* percpu allocator.
*/
if (cpu == 0)
ia64_set_kr(IA64_KR_PER_CPU_DATA,
(unsigned long)cpu_data -
(unsigned long)__per_cpu_start);

cpu_data += PERCPU_PAGE_SIZE;
}
#endif
return cpu_data;
Expand Down

0 comments on commit cbf6755

Please sign in to comment.