From 25359f7645ae2b094dc1c3b82c3bb0ccadb4229b Mon Sep 17 00:00:00 2001 From: Josef 'Jeff' Sipek Date: Thu, 17 Apr 2008 07:45:56 +0200 Subject: [PATCH] --- yaml --- r: 88403 b: refs/heads/master c: 08a8a0c59e54f7eb80897c1e77efa4a541d11008 h: refs/heads/master i: 88401: 8f047d7c9f5a3ab6b8225066d465192f2cf4c766 88399: 354f1d4e14a4f1754712dbccbb47f052b32b9c0d v: v3 --- [refs] | 2 +- trunk/drivers/s390/block/Kconfig | 1 + trunk/drivers/s390/block/dasd.c | 1 + trunk/include/linux/slub_def.h | 2 +- trunk/init/Kconfig | 2 +- trunk/kernel/cgroup.c | 7 +-- trunk/kernel/signal.c | 71 ++++++++++------------- trunk/lib/Kconfig.debug | 2 +- trunk/mm/slub.c | 97 +++++++++++--------------------- 9 files changed, 69 insertions(+), 116 deletions(-) diff --git a/[refs] b/[refs] index 6149cb7c6510..290cf29b32f1 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 7d939fbdfee49e5c06bd27214d25f726fb87a25a +refs/heads/master: 08a8a0c59e54f7eb80897c1e77efa4a541d11008 diff --git a/trunk/drivers/s390/block/Kconfig b/trunk/drivers/s390/block/Kconfig index e879b212cf43..07883197f474 100644 --- a/trunk/drivers/s390/block/Kconfig +++ b/trunk/drivers/s390/block/Kconfig @@ -20,6 +20,7 @@ config DCSSBLK config DASD tristate "Support for DASD devices" depends on CCW && BLOCK + select IOSCHED_DEADLINE help Enable this option if you want to access DASDs directly utilizing S/390s channel subsystem commands. This is necessary for running diff --git a/trunk/drivers/s390/block/dasd.c b/trunk/drivers/s390/block/dasd.c index ccf46c96adb4..54f686d2c694 100644 --- a/trunk/drivers/s390/block/dasd.c +++ b/trunk/drivers/s390/block/dasd.c @@ -1956,6 +1956,7 @@ static int dasd_alloc_queue(struct dasd_block *block) block->request_queue->queuedata = block; elevator_exit(block->request_queue->elevator); + block->request_queue->elevator = NULL; rc = elevator_init(block->request_queue, "deadline"); if (rc) { blk_cleanup_queue(block->request_queue); diff --git a/trunk/include/linux/slub_def.h b/trunk/include/linux/slub_def.h index 79d59c937fac..b00c1c73eb0a 100644 --- a/trunk/include/linux/slub_def.h +++ b/trunk/include/linux/slub_def.h @@ -45,9 +45,9 @@ struct kmem_cache_cpu { struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; + atomic_long_t nr_slabs; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG - atomic_long_t nr_slabs; struct list_head full; #endif }; diff --git a/trunk/init/Kconfig b/trunk/init/Kconfig index 7fccf09bb95a..a97924bc5b8d 100644 --- a/trunk/init/Kconfig +++ b/trunk/init/Kconfig @@ -763,7 +763,7 @@ endmenu # General setup config SLABINFO bool depends on PROC_FS - depends on SLAB || SLUB_DEBUG + depends on SLAB || SLUB default y config RT_MUTEXES diff --git a/trunk/kernel/cgroup.c b/trunk/kernel/cgroup.c index 6d8de051382b..2727f9238359 100644 --- a/trunk/kernel/cgroup.c +++ b/trunk/kernel/cgroup.c @@ -1722,12 +1722,7 @@ void cgroup_enable_task_cg_lists(void) use_task_css_set_links = 1; do_each_thread(g, p) { task_lock(p); - /* - * We should check if the process is exiting, otherwise - * it will race with cgroup_exit() in that the list - * entry won't be deleted though the process has exited. - */ - if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) + if (list_empty(&p->cg_list)) list_add(&p->cg_list, &p->cgroups->tasks); task_unlock(p); } while_each_thread(g, p); diff --git a/trunk/kernel/signal.c b/trunk/kernel/signal.c index cc8303cd093d..6af1210092c3 100644 --- a/trunk/kernel/signal.c +++ b/trunk/kernel/signal.c @@ -1757,45 +1757,6 @@ static int do_signal_stop(int signr) return 1; } -static int ptrace_signal(int signr, siginfo_t *info, - struct pt_regs *regs, void *cookie) -{ - if (!(current->ptrace & PT_PTRACED)) - return signr; - - ptrace_signal_deliver(regs, cookie); - - /* Let the debugger run. */ - ptrace_stop(signr, 0, info); - - /* We're back. Did the debugger cancel the sig? */ - signr = current->exit_code; - if (signr == 0) - return signr; - - current->exit_code = 0; - - /* Update the siginfo structure if the signal has - changed. If the debugger wanted something - specific in the siginfo structure then it should - have updated *info via PTRACE_SETSIGINFO. */ - if (signr != info->si_signo) { - info->si_signo = signr; - info->si_errno = 0; - info->si_code = SI_USER; - info->si_pid = task_pid_vnr(current->parent); - info->si_uid = current->parent->uid; - } - - /* If the (new) signal is now blocked, requeue it. */ - if (sigismember(¤t->blocked, signr)) { - specific_send_sig_info(signr, info, current); - signr = 0; - } - - return signr; -} - int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie) { @@ -1824,10 +1785,36 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, if (!signr) break; /* will return 0 */ - if (signr != SIGKILL) { - signr = ptrace_signal(signr, info, regs, cookie); - if (!signr) + if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { + ptrace_signal_deliver(regs, cookie); + + /* Let the debugger run. */ + ptrace_stop(signr, 0, info); + + /* We're back. Did the debugger cancel the sig? */ + signr = current->exit_code; + if (signr == 0) + continue; + + current->exit_code = 0; + + /* Update the siginfo structure if the signal has + changed. If the debugger wanted something + specific in the siginfo structure then it should + have updated *info via PTRACE_SETSIGINFO. */ + if (signr != info->si_signo) { + info->si_signo = signr; + info->si_errno = 0; + info->si_code = SI_USER; + info->si_pid = task_pid_vnr(current->parent); + info->si_uid = current->parent->uid; + } + + /* If the (new) signal is now blocked, requeue it. */ + if (sigismember(¤t->blocked, signr)) { + specific_send_sig_info(signr, info, current); continue; + } } ka = ¤t->sighand->action[signr-1]; diff --git a/trunk/lib/Kconfig.debug b/trunk/lib/Kconfig.debug index eef557dc46c3..0796c1a090c0 100644 --- a/trunk/lib/Kconfig.debug +++ b/trunk/lib/Kconfig.debug @@ -211,7 +211,7 @@ config SLUB_DEBUG_ON config SLUB_STATS default n bool "Enable SLUB performance statistics" - depends on SLUB && SLUB_DEBUG && SYSFS + depends on SLUB help SLUB statistics are useful to debug SLUBs allocation behavior in order find ways to optimize the allocator. This should never be diff --git a/trunk/mm/slub.c b/trunk/mm/slub.c index 7f8aaa291a4e..acc975fcc8cc 100644 --- a/trunk/mm/slub.c +++ b/trunk/mm/slub.c @@ -837,35 +837,6 @@ static void remove_full(struct kmem_cache *s, struct page *page) spin_unlock(&n->list_lock); } -/* Tracking of the number of slabs for debugging purposes */ -static inline unsigned long slabs_node(struct kmem_cache *s, int node) -{ - struct kmem_cache_node *n = get_node(s, node); - - return atomic_long_read(&n->nr_slabs); -} - -static inline void inc_slabs_node(struct kmem_cache *s, int node) -{ - struct kmem_cache_node *n = get_node(s, node); - - /* - * May be called early in order to allocate a slab for the - * kmem_cache_node structure. Solve the chicken-egg - * dilemma by deferring the increment of the count during - * bootstrap (see early_kmem_cache_node_alloc). - */ - if (!NUMA_BUILD || n) - atomic_long_inc(&n->nr_slabs); -} -static inline void dec_slabs_node(struct kmem_cache *s, int node) -{ - struct kmem_cache_node *n = get_node(s, node); - - atomic_long_dec(&n->nr_slabs); -} - -/* Object debug checks for alloc/free paths */ static void setup_object_debug(struct kmem_cache *s, struct page *page, void *object) { @@ -1057,11 +1028,6 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize, return flags; } #define slub_debug 0 - -static inline unsigned long slabs_node(struct kmem_cache *s, int node) - { return 0; } -static inline void inc_slabs_node(struct kmem_cache *s, int node) {} -static inline void dec_slabs_node(struct kmem_cache *s, int node) {} #endif /* * Slab allocation and freeing @@ -1100,6 +1066,7 @@ static void setup_object(struct kmem_cache *s, struct page *page, static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) { struct page *page; + struct kmem_cache_node *n; void *start; void *last; void *p; @@ -1111,7 +1078,9 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) if (!page) goto out; - inc_slabs_node(s, page_to_nid(page)); + n = get_node(s, page_to_nid(page)); + if (n) + atomic_long_inc(&n->nr_slabs); page->slab = s; page->flags |= 1 << PG_slab; if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | @@ -1156,8 +1125,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page) NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, -pages); - __ClearPageSlab(page); - reset_page_mapcount(page); __free_pages(page, s->order); } @@ -1184,7 +1151,11 @@ static void free_slab(struct kmem_cache *s, struct page *page) static void discard_slab(struct kmem_cache *s, struct page *page) { - dec_slabs_node(s, page_to_nid(page)); + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + + atomic_long_dec(&n->nr_slabs); + reset_page_mapcount(page); + __ClearPageSlab(page); free_slab(s, page); } @@ -1915,18 +1886,15 @@ static void init_kmem_cache_cpu(struct kmem_cache *s, c->node = 0; c->offset = s->offset / sizeof(void *); c->objsize = s->objsize; -#ifdef CONFIG_SLUB_STATS - memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned)); -#endif } static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; + atomic_long_set(&n->nr_slabs, 0); spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG - atomic_long_set(&n->nr_slabs, 0); INIT_LIST_HEAD(&n->full); #endif } @@ -2095,7 +2063,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, init_tracking(kmalloc_caches, n); #endif init_kmem_cache_node(n); - inc_slabs_node(kmalloc_caches, node); + atomic_long_inc(&n->nr_slabs); /* * lockdep requires consistent irq usage for each lock @@ -2408,7 +2376,7 @@ static inline int kmem_cache_close(struct kmem_cache *s) struct kmem_cache_node *n = get_node(s, node); n->nr_partial -= free_list(s, n, &n->partial); - if (slabs_node(s, node)) + if (atomic_long_read(&n->nr_slabs)) return 1; } free_kmem_cache_nodes(s); @@ -2441,6 +2409,10 @@ EXPORT_SYMBOL(kmem_cache_destroy); struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); +#ifdef CONFIG_ZONE_DMA +static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; +#endif + static int __init setup_slub_min_order(char *str) { get_option(&str, &slub_min_order); @@ -2500,7 +2472,6 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, } #ifdef CONFIG_ZONE_DMA -static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; static void sysfs_add_func(struct work_struct *w) { @@ -2717,6 +2688,21 @@ void kfree(const void *x) } EXPORT_SYMBOL(kfree); +#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO) +static unsigned long count_partial(struct kmem_cache_node *n) +{ + unsigned long flags; + unsigned long x = 0; + struct page *page; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + x += page->inuse; + spin_unlock_irqrestore(&n->list_lock, flags); + return x; +} +#endif + /* * kmem_cache_shrink removes empty slabs from the partial lists and sorts * the remaining slabs by the number of items in use. The slabs with the @@ -2830,7 +2816,7 @@ static void slab_mem_offline_callback(void *arg) * and offline_pages() function shoudn't call this * callback. So, we must fail. */ - BUG_ON(slabs_node(s, offline_node)); + BUG_ON(atomic_long_read(&n->nr_slabs)); s->node[offline_node] = NULL; kmem_cache_free(kmalloc_caches, n); @@ -3195,21 +3181,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, return slab_alloc(s, gfpflags, node, caller); } -#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO) -static unsigned long count_partial(struct kmem_cache_node *n) -{ - unsigned long flags; - unsigned long x = 0; - struct page *page; - - spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - x += page->inuse; - spin_unlock_irqrestore(&n->list_lock, flags); - return x; -} -#endif - #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) static int validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) @@ -4008,12 +3979,10 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) len = sprintf(buf, "%lu", sum); -#ifdef CONFIG_SMP for_each_online_cpu(cpu) { if (data[cpu] && len < PAGE_SIZE - 20) - len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); + len += sprintf(buf + len, " c%d=%u", cpu, data[cpu]); } -#endif kfree(data); return len + sprintf(buf + len, "\n"); }