Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 175886
b: refs/heads/master
c: 5259476
h: refs/heads/master
v: v3
  • Loading branch information
Tejun Heo committed Oct 2, 2009
1 parent cbf6755 commit 01d63e6
Show file tree
Hide file tree
Showing 5 changed files with 139 additions and 21 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 36886478f59ec0fdc24a8877c572b92f8d416aba
refs/heads/master: 52594762a39dfb6338c9d0906ca21dd9ae9453be
3 changes: 0 additions & 3 deletions trunk/arch/ia64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,6 @@ config GENERIC_TIME_VSYSCALL
bool
default y

config HAVE_LEGACY_PER_CPU_AREA
def_bool y

config HAVE_SETUP_PER_CPU_AREA
def_bool y

Expand Down
12 changes: 0 additions & 12 deletions trunk/arch/ia64/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -854,18 +854,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
}

/*
* In UP configuration, setup_per_cpu_areas() is defined in
* include/linux/percpu.h
*/
#ifdef CONFIG_SMP
void __init
setup_per_cpu_areas (void)
{
/* start_kernel() requires this... */
}
#endif

/*
* Do the following calculations:
*
Expand Down
58 changes: 53 additions & 5 deletions trunk/arch/ia64/mm/contig.c
Original file line number Diff line number Diff line change
Expand Up @@ -163,11 +163,11 @@ per_cpu_init (void)
first_time = false;

/*
* get_free_pages() cannot be used before cpu_init() done. BSP
* allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
* get_zeroed_page().
* get_free_pages() cannot be used before cpu_init() done.
* BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
* to avoid that AP calls get_zeroed_page().
*/
for (cpu = 0; cpu < NR_CPUS; cpu++) {
for_each_possible_cpu(cpu) {
void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;

memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
Expand Down Expand Up @@ -196,9 +196,57 @@ per_cpu_init (void)
static inline void
alloc_per_cpu_data(void)
{
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}

/**
* setup_per_cpu_areas - setup percpu areas
*
* Arch code has already allocated and initialized percpu areas. All
* this function has to do is to teach the determined layout to the
* dynamic percpu allocator, which happens to be more complex than
* creating whole new ones using helpers.
*/
void __init
setup_per_cpu_areas(void)
{
struct pcpu_alloc_info *ai;
struct pcpu_group_info *gi;
unsigned int cpu;
ssize_t static_size, reserved_size, dyn_size;
int rc;

ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
if (!ai)
panic("failed to allocate pcpu_alloc_info");
gi = &ai->groups[0];

/* units are assigned consecutively to possible cpus */
for_each_possible_cpu(cpu)
gi->cpu_map[gi->nr_units++] = cpu;

/* set parameters */
static_size = __per_cpu_end - __per_cpu_start;
reserved_size = PERCPU_MODULE_RESERVE;
dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
if (dyn_size < 0)
panic("percpu area overflow static=%zd reserved=%zd\n",
static_size, reserved_size);

ai->static_size = static_size;
ai->reserved_size = reserved_size;
ai->dyn_size = dyn_size;
ai->unit_size = PERCPU_PAGE_SIZE;
ai->atom_size = PAGE_SIZE;
ai->alloc_size = PERCPU_PAGE_SIZE;

rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
if (rc)
panic("failed to setup percpu area (err=%d)", rc);

pcpu_free_alloc_info(ai);
}
#else
#define alloc_per_cpu_data() do { } while (0)
#endif /* CONFIG_SMP */
Expand Down
85 changes: 85 additions & 0 deletions trunk/arch/ia64/mm/discontig.c
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,91 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
return cpu_data;
}

#ifdef CONFIG_SMP
/**
* setup_per_cpu_areas - setup percpu areas
*
* Arch code has already allocated and initialized percpu areas. All
* this function has to do is to teach the determined layout to the
* dynamic percpu allocator, which happens to be more complex than
* creating whole new ones using helpers.
*/
void __init setup_per_cpu_areas(void)
{
struct pcpu_alloc_info *ai;
struct pcpu_group_info *uninitialized_var(gi);
unsigned int *cpu_map;
void *base;
unsigned long base_offset;
unsigned int cpu;
ssize_t static_size, reserved_size, dyn_size;
int node, prev_node, unit, nr_units, rc;

ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
if (!ai)
panic("failed to allocate pcpu_alloc_info");
cpu_map = ai->groups[0].cpu_map;

/* determine base */
base = (void *)ULONG_MAX;
for_each_possible_cpu(cpu)
base = min(base,
(void *)(__per_cpu_offset[cpu] + __per_cpu_start));
base_offset = (void *)__per_cpu_start - base;

/* build cpu_map, units are grouped by node */
unit = 0;
for_each_node(node)
for_each_possible_cpu(cpu)
if (node == node_cpuid[cpu].nid)
cpu_map[unit++] = cpu;
nr_units = unit;

/* set basic parameters */
static_size = __per_cpu_end - __per_cpu_start;
reserved_size = PERCPU_MODULE_RESERVE;
dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
if (dyn_size < 0)
panic("percpu area overflow static=%zd reserved=%zd\n",
static_size, reserved_size);

ai->static_size = static_size;
ai->reserved_size = reserved_size;
ai->dyn_size = dyn_size;
ai->unit_size = PERCPU_PAGE_SIZE;
ai->atom_size = PAGE_SIZE;
ai->alloc_size = PERCPU_PAGE_SIZE;

/*
* CPUs are put into groups according to node. Walk cpu_map
* and create new groups at node boundaries.
*/
prev_node = -1;
ai->nr_groups = 0;
for (unit = 0; unit < nr_units; unit++) {
cpu = cpu_map[unit];
node = node_cpuid[cpu].nid;

if (node == prev_node) {
gi->nr_units++;
continue;
}
prev_node = node;

gi = &ai->groups[ai->nr_groups++];
gi->nr_units = 1;
gi->base_offset = __per_cpu_offset[cpu] + base_offset;
gi->cpu_map = &cpu_map[unit];
}

rc = pcpu_setup_first_chunk(ai, base);
if (rc)
panic("failed to setup percpu area (err=%d)", rc);

pcpu_free_alloc_info(ai);
}
#endif

/**
* fill_pernode - initialize pernode data.
* @node: the node id.
Expand Down

0 comments on commit 01d63e6

Please sign in to comment.