Skip to content

Commit

Permalink
percpu: simplify pcpu_setup_first_chunk()
Browse files Browse the repository at this point in the history
Now that all first chunk allocator helpers allocate and map the first
chunk themselves, there's no need to have optional default alloc/map
in pcpu_setup_first_chunk().  Drop @populate_pte_fn and only leave
@dyn_size optional and make all other params mandatory.

This makes it much easier to follow what pcpu_setup_first_chunk() is
doing and what actual differences tweaking each parameter results in.

[ Impact: drop unused code path ]

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Tejun Heo committed Jul 3, 2009
1 parent 8c4bfc6 commit 38a6be5
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 78 deletions.
2 changes: 1 addition & 1 deletion arch/sparc/kernel/smp_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -1528,7 +1528,7 @@ void __init setup_per_cpu_areas(void)

pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size,
PERCPU_MODULE_RESERVE, dyn_size,
PCPU_CHUNK_SIZE, vm.addr, NULL);
PCPU_CHUNK_SIZE, vm.addr);

free_bootmem(__pa(pcpur_ptrs), ptrs_size);

Expand Down
5 changes: 2 additions & 3 deletions include/linux/percpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,8 @@ typedef void (*pcpu_fc_map_fn_t)(void *ptr, size_t size, void *addr);

extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
size_t static_size, size_t reserved_size,
ssize_t dyn_size, ssize_t unit_size,
void *base_addr,
pcpu_fc_populate_pte_fn_t populate_pte_fn);
ssize_t dyn_size, size_t unit_size,
void *base_addr);

extern ssize_t __init pcpu_embed_first_chunk(
size_t static_size, size_t reserved_size,
Expand Down
104 changes: 30 additions & 74 deletions mm/percpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -983,24 +983,22 @@ EXPORT_SYMBOL_GPL(free_percpu);
* pcpu_setup_first_chunk - initialize the first percpu chunk
* @get_page_fn: callback to fetch page pointer
* @static_size: the size of static percpu area in bytes
* @reserved_size: the size of reserved percpu area in bytes
* @reserved_size: the size of reserved percpu area in bytes, 0 for none
* @dyn_size: free size for dynamic allocation in bytes, -1 for auto
* @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
* @base_addr: mapped address, NULL for auto
* @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
* @unit_size: unit size in bytes, must be multiple of PAGE_SIZE
* @base_addr: mapped address
*
* Initialize the first percpu chunk which contains the kernel static
* perpcu area. This function is to be called from arch percpu area
* setup path. The first two parameters are mandatory. The rest are
* optional.
* setup path.
*
* @get_page_fn() should return pointer to percpu page given cpu
* number and page number. It should at least return enough pages to
* cover the static area. The returned pages for static area should
* have been initialized with valid data. If @unit_size is specified,
* it can also return pages after the static area. NULL return
* indicates end of pages for the cpu. Note that @get_page_fn() must
* return the same number of pages for all cpus.
* have been initialized with valid data. It can also return pages
* after the static area. NULL return indicates end of pages for the
* cpu. Note that @get_page_fn() must return the same number of pages
* for all cpus.
*
* @reserved_size, if non-zero, specifies the amount of bytes to
* reserve after the static area in the first chunk. This reserves
Expand All @@ -1015,17 +1013,12 @@ EXPORT_SYMBOL_GPL(free_percpu);
* non-negative value makes percpu leave alone the area beyond
* @static_size + @reserved_size + @dyn_size.
*
* @unit_size, if non-negative, specifies unit size and must be
* aligned to PAGE_SIZE and equal to or larger than @static_size +
* @reserved_size + if non-negative, @dyn_size.
*
* Non-null @base_addr means that the caller already allocated virtual
* region for the first chunk and mapped it. percpu must not mess
* with the chunk. Note that @base_addr with 0 @unit_size or non-NULL
* @populate_pte_fn doesn't make any sense.
* @unit_size specifies unit size and must be aligned to PAGE_SIZE and
* equal to or larger than @static_size + @reserved_size + if
* non-negative, @dyn_size.
*
* @populate_pte_fn is used to populate the pagetable. NULL means the
* caller already populated the pagetable.
* The caller should have mapped the first chunk at @base_addr and
* copied static data to each unit.
*
* If the first chunk ends up with both reserved and dynamic areas, it
* is served by two chunks - one to serve the core static and reserved
Expand All @@ -1040,37 +1033,27 @@ EXPORT_SYMBOL_GPL(free_percpu);
*/
size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
size_t static_size, size_t reserved_size,
ssize_t dyn_size, ssize_t unit_size,
void *base_addr,
pcpu_fc_populate_pte_fn_t populate_pte_fn)
ssize_t dyn_size, size_t unit_size,
void *base_addr)
{
static struct vm_struct first_vm;
static int smap[2], dmap[2];
size_t size_sum = static_size + reserved_size +
(dyn_size >= 0 ? dyn_size : 0);
struct pcpu_chunk *schunk, *dchunk = NULL;
unsigned int cpu;
int nr_pages;
int err, i;
int i, nr_pages;

/* santiy checks */
BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
BUG_ON(!static_size);
if (unit_size >= 0) {
BUG_ON(unit_size < size_sum);
BUG_ON(unit_size & ~PAGE_MASK);
BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);
} else
BUG_ON(base_addr);
BUG_ON(base_addr && populate_pte_fn);

if (unit_size >= 0)
pcpu_unit_pages = unit_size >> PAGE_SHIFT;
else
pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
PFN_UP(size_sum));
BUG_ON(!base_addr);
BUG_ON(unit_size < size_sum);
BUG_ON(unit_size & ~PAGE_MASK);
BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);

pcpu_unit_pages = unit_size >> PAGE_SHIFT;
pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size;
pcpu_chunk_struct_size = sizeof(struct pcpu_chunk)
Expand All @@ -1079,6 +1062,10 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
if (dyn_size < 0)
dyn_size = pcpu_unit_size - static_size - reserved_size;

first_vm.flags = VM_ALLOC;
first_vm.size = pcpu_chunk_size;
first_vm.addr = base_addr;

/*
* Allocate chunk slots. The additional last slot is for
* empty chunks.
Expand All @@ -1101,6 +1088,7 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
schunk->map = smap;
schunk->map_alloc = ARRAY_SIZE(smap);
schunk->page = schunk->page_ar;
schunk->immutable = true;

if (reserved_size) {
schunk->free_size = reserved_size;
Expand All @@ -1124,31 +1112,13 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
dchunk->map = dmap;
dchunk->map_alloc = ARRAY_SIZE(dmap);
dchunk->page = schunk->page_ar; /* share page map with schunk */
dchunk->immutable = true;

dchunk->contig_hint = dchunk->free_size = dyn_size;
dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
dchunk->map[dchunk->map_used++] = dchunk->free_size;
}

/* allocate vm address */
first_vm.flags = VM_ALLOC;
first_vm.size = pcpu_chunk_size;

if (!base_addr)
vm_area_register_early(&first_vm, PAGE_SIZE);
else {
/*
* Pages already mapped. No need to remap into
* vmalloc area. In this case the first chunks can't
* be mapped or unmapped by percpu and are marked
* immutable.
*/
first_vm.addr = base_addr;
schunk->immutable = true;
if (dchunk)
dchunk->immutable = true;
}

/* assign pages */
nr_pages = -1;
for_each_possible_cpu(cpu) {
Expand All @@ -1168,19 +1138,6 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
BUG_ON(nr_pages != i);
}

/* map them */
if (populate_pte_fn) {
for_each_possible_cpu(cpu)
for (i = 0; i < nr_pages; i++)
populate_pte_fn(pcpu_chunk_addr(schunk,
cpu, i));

err = pcpu_map(schunk, 0, nr_pages);
if (err)
panic("failed to setup static percpu area, err=%d\n",
err);
}

/* link the first chunk in */
pcpu_first_chunk = dchunk ?: schunk;
pcpu_chunk_relocate(pcpu_first_chunk, -1);
Expand Down Expand Up @@ -1282,7 +1239,7 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,

return pcpu_setup_first_chunk(pcpue_get_page, static_size,
reserved_size, dyn_size,
pcpue_unit_size, pcpue_ptr, NULL);
pcpue_unit_size, pcpue_ptr);
}

/*
Expand Down Expand Up @@ -1387,8 +1344,7 @@ ssize_t __init pcpu_4k_first_chunk(size_t static_size, size_t reserved_size,

ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
reserved_size, -1,
pcpu4k_unit_pages << PAGE_SHIFT, vm.addr,
NULL);
pcpu4k_unit_pages << PAGE_SHIFT, vm.addr);
goto out_free_ar;

enomem:
Expand Down Expand Up @@ -1521,7 +1477,7 @@ ssize_t __init pcpu_lpage_first_chunk(size_t static_size, size_t reserved_size,

ret = pcpu_setup_first_chunk(pcpul_get_page, static_size,
reserved_size, dyn_size, pcpul_unit_size,
pcpul_vm.addr, NULL);
pcpul_vm.addr);

/* sort pcpul_map array for pcpu_lpage_remapped() */
for (i = 0; i < num_possible_cpus() - 1; i++)
Expand Down

0 comments on commit 38a6be5

Please sign in to comment.