Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 140782
b: refs/heads/master
c: 843adf2
h: refs/heads/master
v: v3
  • Loading branch information
Ingo Molnar committed Feb 22, 2009
1 parent ca377f1 commit 16f4765
Show file tree
Hide file tree
Showing 4 changed files with 63 additions and 30 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 91f73f90d97fa67effbb49e0a79c50cf26dfe324
refs/heads/master: 843adf2379c18ed9a1b7493ee208bfd5512732e0
19 changes: 16 additions & 3 deletions trunk/include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,11 +121,24 @@ struct kmem_cache {

#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)

/*
* Maximum kmalloc object size handled by SLUB. Larger object allocations
* are passed through to the page allocator. The page allocator "fastpath"
* is relatively slow so we need this value sufficiently high so that
* performance critical objects are allocated through the SLUB fastpath.
*
* This should be dropped to PAGE_SIZE / 2 once the page allocator
* "fastpath" becomes competitive with the slab allocator fastpaths.
*/
#define SLUB_MAX_SIZE (PAGE_SIZE)

#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1)

/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];

/*
* Sorry that the following has to be that ugly but some versions of GCC
Expand Down Expand Up @@ -231,7 +244,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
void *ret;

if (__builtin_constant_p(size)) {
if (size > PAGE_SIZE)
if (size > SLUB_MAX_SIZE)
return kmalloc_large(size, flags);

if (!(flags & SLUB_DMA)) {
Expand Down Expand Up @@ -275,7 +288,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
void *ret;

if (__builtin_constant_p(size) &&
size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);

if (!s)
Expand Down
56 changes: 38 additions & 18 deletions trunk/kernel/trace/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -1895,6 +1895,10 @@ static void *g_start(struct seq_file *m, loff_t *pos)

mutex_lock(&graph_lock);

/* Nothing, tell g_show to print all functions are enabled */
if (!ftrace_graph_count && !*pos)
return (void *)1;

p = g_next(m, p, pos);

return p;
Expand All @@ -1913,6 +1917,11 @@ static int g_show(struct seq_file *m, void *v)
if (!ptr)
return 0;

if (ptr == (unsigned long *)1) {
seq_printf(m, "#### all functions enabled ####\n");
return 0;
}

kallsyms_lookup(*ptr, NULL, NULL, NULL, str);

seq_printf(m, "%s\n", str);
Expand Down Expand Up @@ -1966,38 +1975,51 @@ ftrace_graph_read(struct file *file, char __user *ubuf,
}

static int
ftrace_set_func(unsigned long *array, int idx, char *buffer)
ftrace_set_func(unsigned long *array, int *idx, char *buffer)
{
char str[KSYM_SYMBOL_LEN];
struct dyn_ftrace *rec;
struct ftrace_page *pg;
int search_len;
int found = 0;
int j;
int type, not;
char *search;
bool exists;
int i;

if (ftrace_disabled)
return -ENODEV;

/* decode regex */
type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
if (not)
return -EINVAL;

search_len = strlen(search);

mutex_lock(&ftrace_lock);
do_for_each_ftrace_rec(pg, rec) {

if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
break;

if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
continue;

kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
if (strcmp(str, buffer) == 0) {
/* Return 1 if we add it to the array */
found = 1;
for (j = 0; j < idx; j++)
if (array[j] == rec->ip) {
found = 0;
if (ftrace_match_record(rec, search, search_len, type)) {
/* ensure it is not already in the array */
exists = false;
for (i = 0; i < *idx; i++)
if (array[i] == rec->ip) {
exists = true;
break;
}
if (found)
array[idx] = rec->ip;
goto out;
if (!exists) {
array[(*idx)++] = rec->ip;
found = 1;
}
}
} while_for_each_ftrace_rec();
out:

mutex_unlock(&ftrace_lock);

return found ? 0 : -EINVAL;
Expand Down Expand Up @@ -2066,13 +2088,11 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
}
buffer[index] = 0;

/* we allow only one at a time */
ret = ftrace_set_func(array, ftrace_graph_count, buffer);
/* we allow only one expression at a time */
ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
if (ret)
goto out;

ftrace_graph_count++;

file->f_pos += read;

ret = read;
Expand Down
16 changes: 8 additions & 8 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2506,7 +2506,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem
*******************************************************************/

struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);

static int __init setup_slub_min_order(char *str)
Expand Down Expand Up @@ -2568,7 +2568,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
}

#ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];

static void sysfs_add_func(struct work_struct *w)
{
Expand Down Expand Up @@ -2690,7 +2690,7 @@ void *__kmalloc(size_t size, gfp_t flags)
struct kmem_cache *s;
void *ret;

if (unlikely(size > PAGE_SIZE))
if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, flags);

s = get_slab(size, flags);
Expand Down Expand Up @@ -2724,7 +2724,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
struct kmem_cache *s;
void *ret;

if (unlikely(size > PAGE_SIZE)) {
if (unlikely(size > SLUB_MAX_SIZE)) {
ret = kmalloc_large_node(size, flags, node);

kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
Expand Down Expand Up @@ -3039,7 +3039,7 @@ void __init kmem_cache_init(void)
caches++;
}

for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL);
caches++;
Expand Down Expand Up @@ -3076,7 +3076,7 @@ void __init kmem_cache_init(void)
slab_state = UP;

/* Provide the correct kmalloc names now that the caches are up */
for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);

Expand Down Expand Up @@ -3277,7 +3277,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
struct kmem_cache *s;
void *ret;

if (unlikely(size > PAGE_SIZE))
if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, gfpflags);

s = get_slab(size, gfpflags);
Expand All @@ -3300,7 +3300,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
struct kmem_cache *s;
void *ret;

if (unlikely(size > PAGE_SIZE))
if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, gfpflags, node);

s = get_slab(size, gfpflags);
Expand Down

0 comments on commit 16f4765

Please sign in to comment.