Skip to content

Commit

Permalink
fork: Provide kmemcache based thread_info allocator
Browse files Browse the repository at this point in the history
Several architectures have their own kmemcache based thread allocator
because THREAD_SIZE is smaller than PAGE_SIZE. Add it to the core code
conditionally on THREAD_SIZE < PAGE_SIZE so the private copies can go.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20120505150141.491002124@linutronix.de
  • Loading branch information
Thomas Gleixner committed May 8, 2012
1 parent 67ba529 commit 0d15d74
Showing 1 changed file with 27 additions and 0 deletions.
27 changes: 27 additions & 0 deletions kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,11 @@ static inline void free_task_struct(struct task_struct *tsk)

void __weak arch_release_thread_info(struct thread_info *ti) { }

/*
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
* kmemcache based allocator.
*/
# if THREAD_SIZE >= PAGE_SIZE
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
int node)
{
Expand All @@ -146,6 +151,28 @@ static inline void free_thread_info(struct thread_info *ti)
arch_release_thread_info(ti);
free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
# else
static struct kmem_cache *thread_info_cache;

static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
int node)
{
return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
}

static void free_thread_info(struct thread_info *ti)
{
arch_release_thread_info(ti);
kmem_cache_free(thread_info_cache, ti);
}

void thread_info_cache_init(void)
{
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
THREAD_SIZE, 0, NULL);
BUG_ON(thread_info_cache == NULL);
}
# endif
#endif

/* SLAB cache for signal_struct structures (tsk->signal) */
Expand Down

0 comments on commit 0d15d74

Please sign in to comment.