Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 88439
b: refs/heads/master
c: 4cba84b
h: refs/heads/master
i:
  88437: 031e0a0
  88435: c51cdca
  88431: e0c75f5
v: v3
  • Loading branch information
Linus Torvalds committed Apr 18, 2008
1 parent 77bee36 commit 8565cb5
Show file tree
Hide file tree
Showing 7 changed files with 116 additions and 67 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ca68305bf3c76c4a7cd1c77d5423219f39164df8
refs/heads/master: 4cba84b5d61af81f1f329f4d05170427a9819c39
2 changes: 1 addition & 1 deletion trunk/include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,9 @@ struct kmem_cache_cpu {
struct kmem_cache_node {
spinlock_t list_lock; /* Protect partial list and nr_partial */
unsigned long nr_partial;
atomic_long_t nr_slabs;
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
struct list_head full;
#endif
};
Expand Down
2 changes: 1 addition & 1 deletion trunk/init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -763,7 +763,7 @@ endmenu # General setup
config SLABINFO
bool
depends on PROC_FS
depends on SLAB || SLUB
depends on SLAB || SLUB_DEBUG
default y

config RT_MUTEXES
Expand Down
7 changes: 6 additions & 1 deletion trunk/kernel/cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1722,7 +1722,12 @@ void cgroup_enable_task_cg_lists(void)
use_task_css_set_links = 1;
do_each_thread(g, p) {
task_lock(p);
if (list_empty(&p->cg_list))
/*
* We should check if the process is exiting, otherwise
* it will race with cgroup_exit() in that the list
* entry won't be deleted though the process has exited.
*/
if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
list_add(&p->cg_list, &p->cgroups->tasks);
task_unlock(p);
} while_each_thread(g, p);
Expand Down
71 changes: 42 additions & 29 deletions trunk/kernel/signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -1757,6 +1757,45 @@ static int do_signal_stop(int signr)
return 1;
}

static int ptrace_signal(int signr, siginfo_t *info,
struct pt_regs *regs, void *cookie)
{
if (!(current->ptrace & PT_PTRACED))
return signr;

ptrace_signal_deliver(regs, cookie);

/* Let the debugger run. */
ptrace_stop(signr, 0, info);

/* We're back. Did the debugger cancel the sig? */
signr = current->exit_code;
if (signr == 0)
return signr;

current->exit_code = 0;

/* Update the siginfo structure if the signal has
changed. If the debugger wanted something
specific in the siginfo structure then it should
have updated *info via PTRACE_SETSIGINFO. */
if (signr != info->si_signo) {
info->si_signo = signr;
info->si_errno = 0;
info->si_code = SI_USER;
info->si_pid = task_pid_vnr(current->parent);
info->si_uid = current->parent->uid;
}

/* If the (new) signal is now blocked, requeue it. */
if (sigismember(&current->blocked, signr)) {
specific_send_sig_info(signr, info, current);
signr = 0;
}

return signr;
}

int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
struct pt_regs *regs, void *cookie)
{
Expand Down Expand Up @@ -1785,36 +1824,10 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
if (!signr)
break; /* will return 0 */

if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
ptrace_signal_deliver(regs, cookie);

/* Let the debugger run. */
ptrace_stop(signr, 0, info);

/* We're back. Did the debugger cancel the sig? */
signr = current->exit_code;
if (signr == 0)
continue;

current->exit_code = 0;

/* Update the siginfo structure if the signal has
changed. If the debugger wanted something
specific in the siginfo structure then it should
have updated *info via PTRACE_SETSIGINFO. */
if (signr != info->si_signo) {
info->si_signo = signr;
info->si_errno = 0;
info->si_code = SI_USER;
info->si_pid = task_pid_vnr(current->parent);
info->si_uid = current->parent->uid;
}

/* If the (new) signal is now blocked, requeue it. */
if (sigismember(&current->blocked, signr)) {
specific_send_sig_info(signr, info, current);
if (signr != SIGKILL) {
signr = ptrace_signal(signr, info, regs, cookie);
if (!signr)
continue;
}
}

ka = &current->sighand->action[signr-1];
Expand Down
2 changes: 1 addition & 1 deletion trunk/lib/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ config SLUB_DEBUG_ON
config SLUB_STATS
default n
bool "Enable SLUB performance statistics"
depends on SLUB
depends on SLUB && SLUB_DEBUG && SYSFS
help
SLUB statistics are useful to debug SLUBs allocation behavior in
order find ways to optimize the allocator. This should never be
Expand Down
97 changes: 64 additions & 33 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -837,6 +837,35 @@ static void remove_full(struct kmem_cache *s, struct page *page)
spin_unlock(&n->list_lock);
}

/* Tracking of the number of slabs for debugging purposes */
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{
struct kmem_cache_node *n = get_node(s, node);

return atomic_long_read(&n->nr_slabs);
}

static inline void inc_slabs_node(struct kmem_cache *s, int node)
{
struct kmem_cache_node *n = get_node(s, node);

/*
* May be called early in order to allocate a slab for the
* kmem_cache_node structure. Solve the chicken-egg
* dilemma by deferring the increment of the count during
* bootstrap (see early_kmem_cache_node_alloc).
*/
if (!NUMA_BUILD || n)
atomic_long_inc(&n->nr_slabs);
}
static inline void dec_slabs_node(struct kmem_cache *s, int node)
{
struct kmem_cache_node *n = get_node(s, node);

atomic_long_dec(&n->nr_slabs);
}

/* Object debug checks for alloc/free paths */
static void setup_object_debug(struct kmem_cache *s, struct page *page,
void *object)
{
Expand Down Expand Up @@ -1028,6 +1057,11 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
return flags;
}
#define slub_debug 0

static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{ return 0; }
static inline void inc_slabs_node(struct kmem_cache *s, int node) {}
static inline void dec_slabs_node(struct kmem_cache *s, int node) {}
#endif
/*
* Slab allocation and freeing
Expand Down Expand Up @@ -1066,7 +1100,6 @@ static void setup_object(struct kmem_cache *s, struct page *page,
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
struct kmem_cache_node *n;
void *start;
void *last;
void *p;
Expand All @@ -1078,9 +1111,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
if (!page)
goto out;

n = get_node(s, page_to_nid(page));
if (n)
atomic_long_inc(&n->nr_slabs);
inc_slabs_node(s, page_to_nid(page));
page->slab = s;
page->flags |= 1 << PG_slab;
if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
Expand Down Expand Up @@ -1125,6 +1156,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-pages);

__ClearPageSlab(page);
reset_page_mapcount(page);
__free_pages(page, s->order);
}

Expand All @@ -1151,11 +1184,7 @@ static void free_slab(struct kmem_cache *s, struct page *page)

static void discard_slab(struct kmem_cache *s, struct page *page)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));

atomic_long_dec(&n->nr_slabs);
reset_page_mapcount(page);
__ClearPageSlab(page);
dec_slabs_node(s, page_to_nid(page));
free_slab(s, page);
}

Expand Down Expand Up @@ -1886,15 +1915,18 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
c->node = 0;
c->offset = s->offset / sizeof(void *);
c->objsize = s->objsize;
#ifdef CONFIG_SLUB_STATS
memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
#endif
}

static void init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
atomic_long_set(&n->nr_slabs, 0);
spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
INIT_LIST_HEAD(&n->full);
#endif
}
Expand Down Expand Up @@ -2063,7 +2095,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
init_tracking(kmalloc_caches, n);
#endif
init_kmem_cache_node(n);
atomic_long_inc(&n->nr_slabs);
inc_slabs_node(kmalloc_caches, node);

/*
* lockdep requires consistent irq usage for each lock
Expand Down Expand Up @@ -2376,7 +2408,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
struct kmem_cache_node *n = get_node(s, node);

n->nr_partial -= free_list(s, n, &n->partial);
if (atomic_long_read(&n->nr_slabs))
if (slabs_node(s, node))
return 1;
}
free_kmem_cache_nodes(s);
Expand Down Expand Up @@ -2409,10 +2441,6 @@ EXPORT_SYMBOL(kmem_cache_destroy);
struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);

#ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
#endif

static int __init setup_slub_min_order(char *str)
{
get_option(&str, &slub_min_order);
Expand Down Expand Up @@ -2472,6 +2500,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
}

#ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];

static void sysfs_add_func(struct work_struct *w)
{
Expand Down Expand Up @@ -2688,21 +2717,6 @@ void kfree(const void *x)
}
EXPORT_SYMBOL(kfree);

#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO)
static unsigned long count_partial(struct kmem_cache_node *n)
{
unsigned long flags;
unsigned long x = 0;
struct page *page;

spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
x += page->inuse;
spin_unlock_irqrestore(&n->list_lock, flags);
return x;
}
#endif

/*
* kmem_cache_shrink removes empty slabs from the partial lists and sorts
* the remaining slabs by the number of items in use. The slabs with the
Expand Down Expand Up @@ -2816,7 +2830,7 @@ static void slab_mem_offline_callback(void *arg)
* and offline_pages() function shoudn't call this
* callback. So, we must fail.
*/
BUG_ON(atomic_long_read(&n->nr_slabs));
BUG_ON(slabs_node(s, offline_node));

s->node[offline_node] = NULL;
kmem_cache_free(kmalloc_caches, n);
Expand Down Expand Up @@ -3181,6 +3195,21 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
return slab_alloc(s, gfpflags, node, caller);
}

#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
static unsigned long count_partial(struct kmem_cache_node *n)
{
unsigned long flags;
unsigned long x = 0;
struct page *page;

spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
x += page->inuse;
spin_unlock_irqrestore(&n->list_lock, flags);
return x;
}
#endif

#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
Expand Down Expand Up @@ -3979,10 +4008,12 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)

len = sprintf(buf, "%lu", sum);

#ifdef CONFIG_SMP
for_each_online_cpu(cpu) {
if (data[cpu] && len < PAGE_SIZE - 20)
len += sprintf(buf + len, " c%d=%u", cpu, data[cpu]);
len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
}
#endif
kfree(data);
return len + sprintf(buf + len, "\n");
}
Expand Down

0 comments on commit 8565cb5

Please sign in to comment.