Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 192480
b: refs/heads/master
c: 88999a8
h: refs/heads/master
v: v3
  • Loading branch information
Tejun Heo committed May 1, 2010
1 parent 6995095 commit e4cfaab
Show file tree
Hide file tree
Showing 2 changed files with 57 additions and 56 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6081089fd6f216b0eb8849205ad0c350cd5ed9bc
refs/heads/master: 88999a898b565960690f18e4a13a1e8a9fa4dfef
111 changes: 56 additions & 55 deletions trunk/mm/percpu.c
Original file line number Diff line number Diff line change
@@ -1,20 +1,19 @@
/*
* linux/mm/percpu.c - percpu memory allocator
* mm/percpu.c - percpu memory allocator
*
* Copyright (C) 2009 SUSE Linux Products GmbH
* Copyright (C) 2009 Tejun Heo <tj@kernel.org>
*
* This file is released under the GPLv2.
*
* This is percpu allocator which can handle both static and dynamic
* areas. Percpu areas are allocated in chunks in vmalloc area. Each
* chunk is consisted of boot-time determined number of units and the
* first chunk is used for static percpu variables in the kernel image
* areas. Percpu areas are allocated in chunks. Each chunk is
* consisted of boot-time determined number of units and the first
* chunk is used for static percpu variables in the kernel image
* (special boot time alloc/init handling necessary as these areas
* need to be brought up before allocation services are running).
* Unit grows as necessary and all units grow or shrink in unison.
* When a chunk is filled up, another chunk is allocated. ie. in
* vmalloc area
* When a chunk is filled up, another chunk is allocated.
*
* c0 c1 c2
* ------------------- ------------------- ------------
Expand Down Expand Up @@ -99,7 +98,7 @@ struct pcpu_chunk {
int map_used; /* # of map entries used */
int map_alloc; /* # of map entries allocated */
int *map; /* allocation map */
struct vm_struct **vms; /* mapped vmalloc regions */
void *data; /* chunk data */
bool immutable; /* no [de]population allowed */
unsigned long populated[]; /* populated bitmap */
};
Expand Down Expand Up @@ -213,13 +212,25 @@ static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
return pcpu_size_to_slot(chunk->free_size);
}

static int pcpu_page_idx(unsigned int cpu, int page_idx)
/* set the pointer to a chunk in a page struct */
static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
{
page->index = (unsigned long)pcpu;
}

/* obtain pointer to a chunk from a page struct */
static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
{
return (struct pcpu_chunk *)page->index;
}

static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
{
return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
}

static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
unsigned int cpu, int page_idx)
static unsigned long __maybe_unused pcpu_chunk_addr(struct pcpu_chunk *chunk,
unsigned int cpu, int page_idx)
{
return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
(page_idx << PAGE_SHIFT);
Expand All @@ -234,25 +245,15 @@ static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
}

/* set the pointer to a chunk in a page struct */
static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
{
page->index = (unsigned long)pcpu;
}

/* obtain pointer to a chunk from a page struct */
static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
{
return (struct pcpu_chunk *)page->index;
}

static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
int *rs, int *re, int end)
{
*rs = find_next_zero_bit(chunk->populated, end, *rs);
*re = find_next_bit(chunk->populated, end, *rs + 1);
}

static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
int *rs, int *re, int end)
{
*rs = find_next_bit(chunk->populated, end, *rs);
*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
Expand Down Expand Up @@ -340,34 +341,6 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
}
}

/**
* pcpu_chunk_addr_search - determine chunk containing specified address
* @addr: address for which the chunk needs to be determined.
*
* RETURNS:
* The address of the found chunk.
*/
static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
{
/* is it in the first chunk? */
if (pcpu_addr_in_first_chunk(addr)) {
/* is it in the reserved area? */
if (pcpu_addr_in_reserved_chunk(addr))
return pcpu_reserved_chunk;
return pcpu_first_chunk;
}

/*
* The address is relative to unit0 which might be unused and
* thus unmapped. Offset the address to the unit space of the
* current processor before looking it up in the vmalloc
* space. Note that any possible cpu id can be used here, so
* there's no need to worry about preemption or cpu hotplug.
*/
addr += pcpu_unit_offsets[raw_smp_processor_id()];
return pcpu_get_page_chunk(vmalloc_to_page(addr));
}

/**
* pcpu_need_to_extend - determine whether chunk area map needs to be extended
* @chunk: chunk of interest
Expand Down Expand Up @@ -1062,8 +1035,8 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)

static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
{
if (chunk && chunk->vms)
pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups);
if (chunk && chunk->data)
pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
pcpu_free_chunk(chunk);
}

Expand All @@ -1083,11 +1056,39 @@ static struct pcpu_chunk *pcpu_create_chunk(void)
return NULL;
}

chunk->vms = vms;
chunk->data = vms;
chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
return chunk;
}

/**
* pcpu_chunk_addr_search - determine chunk containing specified address
* @addr: address for which the chunk needs to be determined.
*
* RETURNS:
* The address of the found chunk.
*/
static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
{
/* is it in the first chunk? */
if (pcpu_addr_in_first_chunk(addr)) {
/* is it in the reserved area? */
if (pcpu_addr_in_reserved_chunk(addr))
return pcpu_reserved_chunk;
return pcpu_first_chunk;
}

/*
* The address is relative to unit0 which might be unused and
* thus unmapped. Offset the address to the unit space of the
* current processor before looking it up in the vmalloc
* space. Note that any possible cpu id can be used here, so
* there's no need to worry about preemption or cpu hotplug.
*/
addr += pcpu_unit_offsets[raw_smp_processor_id()];
return pcpu_get_page_chunk(vmalloc_to_page(addr));
}

/**
* pcpu_alloc - the percpu allocator
* @size: size of area to allocate in bytes
Expand Down

0 comments on commit e4cfaab

Please sign in to comment.