diff --git a/[refs] b/[refs] index 8a6b93cf6bb8..ae94e9ba0a26 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: fd37617e69fb865348d012eb1413aef0141ae2de +refs/heads/master: 773ff60e841461cb1f9374a713ffcda029b8c317 diff --git a/trunk/include/linux/fault-inject.h b/trunk/include/linux/fault-inject.h index 32368c4f0326..06ca9b21dad2 100644 --- a/trunk/include/linux/fault-inject.h +++ b/trunk/include/linux/fault-inject.h @@ -81,4 +81,13 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr) #endif /* CONFIG_FAULT_INJECTION */ +#ifdef CONFIG_FAILSLAB +extern bool should_failslab(size_t size, gfp_t gfpflags); +#else +static inline bool should_failslab(size_t size, gfp_t gfpflags) +{ + return false; +} +#endif /* CONFIG_FAILSLAB */ + #endif /* _LINUX_FAULT_INJECT_H */ diff --git a/trunk/include/linux/slab.h b/trunk/include/linux/slab.h index f96d13c281e8..000da12b5cf0 100644 --- a/trunk/include/linux/slab.h +++ b/trunk/include/linux/slab.h @@ -253,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, * request comes from. */ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); +extern void *__kmalloc_track_caller(size_t, gfp_t, void*); #define kmalloc_track_caller(size, flags) \ - __kmalloc_track_caller(size, flags, _RET_IP_) + __kmalloc_track_caller(size, flags, __builtin_return_address(0)) #else #define kmalloc_track_caller(size, flags) \ __kmalloc(size, flags) @@ -271,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); * allocation request comes from. */ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node_track_caller(size, flags, node, \ - _RET_IP_) + __builtin_return_address(0)) #else #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node(size, flags, node) @@ -285,7 +285,7 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); #define kmalloc_node_track_caller(size, flags, node) \ kmalloc_track_caller(size, flags) -#endif /* CONFIG_NUMA */ +#endif /* DEBUG_SLAB */ /* * Shortcuts diff --git a/trunk/lib/Kconfig.debug b/trunk/lib/Kconfig.debug index b0f239e443bc..af65ae7f0549 100644 --- a/trunk/lib/Kconfig.debug +++ b/trunk/lib/Kconfig.debug @@ -699,6 +699,7 @@ config FAULT_INJECTION config FAILSLAB bool "Fault-injection capability for kmalloc" depends on FAULT_INJECTION + depends on SLAB || SLUB help Provide fault-injection capability for kmalloc. diff --git a/trunk/mm/Makefile b/trunk/mm/Makefile index c06b45a1ff5f..51c27709cc7c 100644 --- a/trunk/mm/Makefile +++ b/trunk/mm/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_SLOB) += slob.o obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLUB) += slub.o +obj-$(CONFIG_FAILSLAB) += failslab.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_MIGRATION) += migrate.o diff --git a/trunk/mm/failslab.c b/trunk/mm/failslab.c new file mode 100644 index 000000000000..7c6ea6493f80 --- /dev/null +++ b/trunk/mm/failslab.c @@ -0,0 +1,59 @@ +#include + +static struct { + struct fault_attr attr; + u32 ignore_gfp_wait; +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + struct dentry *ignore_gfp_wait_file; +#endif +} failslab = { + .attr = FAULT_ATTR_INITIALIZER, + .ignore_gfp_wait = 1, +}; + +bool should_failslab(size_t size, gfp_t gfpflags) +{ + if (gfpflags & __GFP_NOFAIL) + return false; + + if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) + return false; + + return should_fail(&failslab.attr, size); +} + +static int __init setup_failslab(char *str) +{ + return setup_fault_attr(&failslab.attr, str); +} +__setup("failslab=", setup_failslab); + +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + +static int __init failslab_debugfs_init(void) +{ + mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; + struct dentry *dir; + int err; + + err = init_fault_attr_dentries(&failslab.attr, "failslab"); + if (err) + return err; + dir = failslab.attr.dentries.dir; + + failslab.ignore_gfp_wait_file = + debugfs_create_bool("ignore-gfp-wait", mode, dir, + &failslab.ignore_gfp_wait); + + if (!failslab.ignore_gfp_wait_file) { + err = -ENOMEM; + debugfs_remove(failslab.ignore_gfp_wait_file); + cleanup_fault_attr_dentries(&failslab.attr); + } + + return err; +} + +late_initcall(failslab_debugfs_init); + +#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ diff --git a/trunk/mm/slab.c b/trunk/mm/slab.c index cb2e411d93a9..c347dd8480cc 100644 --- a/trunk/mm/slab.c +++ b/trunk/mm/slab.c @@ -2123,8 +2123,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) * * @name must be valid until the cache is destroyed. This implies that * the module calling this has to destroy the cache before getting unloaded. - * Note that kmem_cache_name() is not guaranteed to return the same pointer, - * therefore applications must manage it themselves. * * The flags are * @@ -2611,7 +2609,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, if (OFF_SLAB(cachep)) { /* Slab management obj is off-slab. */ slabp = kmem_cache_alloc_node(cachep->slabp_cache, - local_flags, nodeid); + local_flags & ~GFP_THISNODE, nodeid); if (!slabp) return NULL; } else { @@ -2999,7 +2997,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) * there must be at least one object available for * allocation. */ - BUG_ON(slabp->inuse >= cachep->num); + BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); while (slabp->inuse < cachep->num && batchcount--) { STATS_INC_ALLOCED(cachep); @@ -3108,79 +3106,14 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #endif -#ifdef CONFIG_FAILSLAB - -static struct failslab_attr { - - struct fault_attr attr; - - u32 ignore_gfp_wait; -#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS - struct dentry *ignore_gfp_wait_file; -#endif - -} failslab = { - .attr = FAULT_ATTR_INITIALIZER, - .ignore_gfp_wait = 1, -}; - -static int __init setup_failslab(char *str) -{ - return setup_fault_attr(&failslab.attr, str); -} -__setup("failslab=", setup_failslab); - -static int should_failslab(struct kmem_cache *cachep, gfp_t flags) +static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) { if (cachep == &cache_cache) - return 0; - if (flags & __GFP_NOFAIL) - return 0; - if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT)) - return 0; + return false; - return should_fail(&failslab.attr, obj_size(cachep)); + return should_failslab(obj_size(cachep), flags); } -#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS - -static int __init failslab_debugfs(void) -{ - mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; - struct dentry *dir; - int err; - - err = init_fault_attr_dentries(&failslab.attr, "failslab"); - if (err) - return err; - dir = failslab.attr.dentries.dir; - - failslab.ignore_gfp_wait_file = - debugfs_create_bool("ignore-gfp-wait", mode, dir, - &failslab.ignore_gfp_wait); - - if (!failslab.ignore_gfp_wait_file) { - err = -ENOMEM; - debugfs_remove(failslab.ignore_gfp_wait_file); - cleanup_fault_attr_dentries(&failslab.attr); - } - - return err; -} - -late_initcall(failslab_debugfs); - -#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ - -#else /* CONFIG_FAILSLAB */ - -static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags) -{ - return 0; -} - -#endif /* CONFIG_FAILSLAB */ - static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *objp; @@ -3383,7 +3316,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, unsigned long save_flags; void *ptr; - if (should_failslab(cachep, flags)) + if (slab_should_failslab(cachep, flags)) return NULL; cache_alloc_debugcheck_before(cachep, flags); @@ -3459,7 +3392,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) unsigned long save_flags; void *objp; - if (should_failslab(cachep, flags)) + if (slab_should_failslab(cachep, flags)) return NULL; cache_alloc_debugcheck_before(cachep, flags); @@ -3688,9 +3621,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) EXPORT_SYMBOL(__kmalloc_node); void *__kmalloc_node_track_caller(size_t size, gfp_t flags, - int node, unsigned long caller) + int node, void *caller) { - return __do_kmalloc_node(size, flags, node, (void *)caller); + return __do_kmalloc_node(size, flags, node, caller); } EXPORT_SYMBOL(__kmalloc_node_track_caller); #else @@ -3732,9 +3665,9 @@ void *__kmalloc(size_t size, gfp_t flags) } EXPORT_SYMBOL(__kmalloc); -void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) +void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) { - return __do_kmalloc(size, flags, (void *)caller); + return __do_kmalloc(size, flags, caller); } EXPORT_SYMBOL(__kmalloc_track_caller); diff --git a/trunk/mm/slub.c b/trunk/mm/slub.c index ca95e45f04c3..640fde7e354c 100644 --- a/trunk/mm/slub.c +++ b/trunk/mm/slub.c @@ -24,6 +24,7 @@ #include #include #include +#include /* * Lock order: @@ -153,10 +154,6 @@ #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #endif -#define OO_SHIFT 16 -#define OO_MASK ((1 << OO_SHIFT) - 1) -#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ - /* Internal SLUB flags */ #define __OBJECT_POISON 0x80000000 /* Poison object */ #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ @@ -182,7 +179,7 @@ static LIST_HEAD(slab_caches); * Tracking user of a slab. */ struct track { - unsigned long addr; /* Called from address */ + void *addr; /* Called from address */ int cpu; /* Was running on cpu */ int pid; /* Pid context */ unsigned long when; /* When did the operation occur */ @@ -294,7 +291,7 @@ static inline struct kmem_cache_order_objects oo_make(int order, unsigned long size) { struct kmem_cache_order_objects x = { - (order << OO_SHIFT) + (PAGE_SIZE << order) / size + (order << 16) + (PAGE_SIZE << order) / size }; return x; @@ -302,12 +299,12 @@ static inline struct kmem_cache_order_objects oo_make(int order, static inline int oo_order(struct kmem_cache_order_objects x) { - return x.x >> OO_SHIFT; + return x.x >> 16; } static inline int oo_objects(struct kmem_cache_order_objects x) { - return x.x & OO_MASK; + return x.x & ((1 << 16) - 1); } #ifdef CONFIG_SLUB_DEBUG @@ -371,7 +368,7 @@ static struct track *get_track(struct kmem_cache *s, void *object, } static void set_track(struct kmem_cache *s, void *object, - enum track_item alloc, unsigned long addr) + enum track_item alloc, void *addr) { struct track *p; @@ -395,8 +392,8 @@ static void init_tracking(struct kmem_cache *s, void *object) if (!(s->flags & SLAB_STORE_USER)) return; - set_track(s, object, TRACK_FREE, 0UL); - set_track(s, object, TRACK_ALLOC, 0UL); + set_track(s, object, TRACK_FREE, NULL); + set_track(s, object, TRACK_ALLOC, NULL); } static void print_track(const char *s, struct track *t) @@ -405,7 +402,7 @@ static void print_track(const char *s, struct track *t) return; printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", - s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); + s, t->addr, jiffies - t->when, t->cpu, t->pid); } static void print_tracking(struct kmem_cache *s, void *object) @@ -696,7 +693,7 @@ static int check_object(struct kmem_cache *s, struct page *page, if (!check_valid_pointer(s, page, get_freepointer(s, p))) { object_err(s, page, p, "Freepointer corrupt"); /* - * No choice but to zap it and thus lose the remainder + * No choice but to zap it and thus loose the remainder * of the free objects in this slab. May cause * another error because the object count is now wrong. */ @@ -768,8 +765,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) } max_objects = (PAGE_SIZE << compound_order(page)) / s->size; - if (max_objects > MAX_OBJS_PER_PAGE) - max_objects = MAX_OBJS_PER_PAGE; + if (max_objects > 65535) + max_objects = 65535; if (page->objects != max_objects) { slab_err(s, page, "Wrong number of objects. Found %d but " @@ -870,7 +867,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, } static int alloc_debug_processing(struct kmem_cache *s, struct page *page, - void *object, unsigned long addr) + void *object, void *addr) { if (!check_slab(s, page)) goto bad; @@ -910,7 +907,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page, } static int free_debug_processing(struct kmem_cache *s, struct page *page, - void *object, unsigned long addr) + void *object, void *addr) { if (!check_slab(s, page)) goto fail; @@ -1033,10 +1030,10 @@ static inline void setup_object_debug(struct kmem_cache *s, struct page *page, void *object) {} static inline int alloc_debug_processing(struct kmem_cache *s, - struct page *page, void *object, unsigned long addr) { return 0; } + struct page *page, void *object, void *addr) { return 0; } static inline int free_debug_processing(struct kmem_cache *s, - struct page *page, void *object, unsigned long addr) { return 0; } + struct page *page, void *object, void *addr) { return 0; } static inline int slab_pad_check(struct kmem_cache *s, struct page *page) { return 1; } @@ -1503,8 +1500,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node) * we need to allocate a new slab. This is the slowest path since it involves * a call to the page allocator and the setup of a new slab. */ -static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - unsigned long addr, struct kmem_cache_cpu *c) +static void *__slab_alloc(struct kmem_cache *s, + gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) { void **object; struct page *new; @@ -1588,14 +1585,16 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, * Otherwise we can simply pick the next object from the lockless free list. */ static __always_inline void *slab_alloc(struct kmem_cache *s, - gfp_t gfpflags, int node, unsigned long addr) + gfp_t gfpflags, int node, void *addr) { void **object; struct kmem_cache_cpu *c; unsigned long flags; unsigned int objsize; - might_sleep_if(gfpflags & __GFP_WAIT); + if (should_failslab(s->objsize, gfpflags)) + return NULL; + local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); objsize = c->objsize; @@ -1618,14 +1617,14 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { - return slab_alloc(s, gfpflags, -1, _RET_IP_); + return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_NUMA void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { - return slab_alloc(s, gfpflags, node, _RET_IP_); + return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_alloc_node); #endif @@ -1639,7 +1638,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); * handling required then we can return immediately. */ static void __slab_free(struct kmem_cache *s, struct page *page, - void *x, unsigned long addr, unsigned int offset) + void *x, void *addr, unsigned int offset) { void *prior; void **object = (void *)x; @@ -1709,7 +1708,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, * with all sorts of special processing. */ static __always_inline void slab_free(struct kmem_cache *s, - struct page *page, void *x, unsigned long addr) + struct page *page, void *x, void *addr) { void **object = (void *)x; struct kmem_cache_cpu *c; @@ -1736,11 +1735,11 @@ void kmem_cache_free(struct kmem_cache *s, void *x) page = virt_to_head_page(x); - slab_free(s, page, x, _RET_IP_); + slab_free(s, page, x, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_free); -/* Figure out on which slab page the object resides */ +/* Figure out on which slab object the object resides */ static struct page *get_object_page(const void *x) { struct page *page = virt_to_head_page(x); @@ -1812,8 +1811,8 @@ static inline int slab_order(int size, int min_objects, int rem; int min_order = slub_min_order; - if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE) - return get_order(size * MAX_OBJS_PER_PAGE) - 1; + if ((PAGE_SIZE << min_order) / size > 65535) + return get_order(size * 65535) - 1; for (order = max(min_order, fls(min_objects * size - 1) - PAGE_SHIFT); @@ -2078,7 +2077,8 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) * when allocating for the kmalloc_node_cache. This is used for bootstrapping * memory on a fresh node that has no slab structures yet. */ -static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node) +static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, + int node) { struct page *page; struct kmem_cache_node *n; @@ -2116,6 +2116,7 @@ static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node) local_irq_save(flags); add_partial(n, page, 0); local_irq_restore(flags); + return n; } static void free_kmem_cache_nodes(struct kmem_cache *s) @@ -2147,7 +2148,8 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) n = &s->local_node; else { if (slab_state == DOWN) { - early_kmem_cache_node_alloc(gfpflags, node); + n = early_kmem_cache_node_alloc(gfpflags, + node); continue; } n = kmem_cache_alloc_node(kmalloc_caches, @@ -2661,7 +2663,7 @@ void *__kmalloc(size_t size, gfp_t flags) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - return slab_alloc(s, flags, -1, _RET_IP_); + return slab_alloc(s, flags, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(__kmalloc); @@ -2689,7 +2691,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - return slab_alloc(s, flags, node, _RET_IP_); + return slab_alloc(s, flags, node, __builtin_return_address(0)); } EXPORT_SYMBOL(__kmalloc_node); #endif @@ -2746,7 +2748,7 @@ void kfree(const void *x) put_page(page); return; } - slab_free(page->slab, page, object, _RET_IP_); + slab_free(page->slab, page, object, __builtin_return_address(0)); } EXPORT_SYMBOL(kfree); @@ -3125,12 +3127,8 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); up_write(&slub_lock); - if (sysfs_slab_alias(s, name)) { - down_write(&slub_lock); - s->refcount--; - up_write(&slub_lock); + if (sysfs_slab_alias(s, name)) goto err; - } return s; } @@ -3140,13 +3138,8 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size, align, flags, ctor)) { list_add(&s->list, &slab_caches); up_write(&slub_lock); - if (sysfs_slab_add(s)) { - down_write(&slub_lock); - list_del(&s->list); - up_write(&slub_lock); - kfree(s); + if (sysfs_slab_add(s)) goto err; - } return s; } kfree(s); @@ -3213,7 +3206,7 @@ static struct notifier_block __cpuinitdata slab_notifier = { #endif -void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) +void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) { struct kmem_cache *s; @@ -3229,7 +3222,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) } void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, - int node, unsigned long caller) + int node, void *caller) { struct kmem_cache *s; @@ -3440,7 +3433,7 @@ static void resiliency_test(void) {}; struct location { unsigned long count; - unsigned long addr; + void *addr; long long sum_time; long min_time; long max_time; @@ -3488,7 +3481,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, { long start, end, pos; struct location *l; - unsigned long caddr; + void *caddr; unsigned long age = jiffies - track->when; start = -1; @@ -4356,7 +4349,7 @@ static void sysfs_slab_remove(struct kmem_cache *s) /* * Need to buffer aliases during bootup until sysfs becomes - * available lest we lose that information. + * available lest we loose that information. */ struct saved_alias { struct kmem_cache *s;