From 210b5c06130f266370b5ff86e3cb6d860e1be29c Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Wed, 22 Oct 2008 23:00:38 +0400 Subject: [PATCH 01/12] SLUB: cleanup - define macros instead of hardcoded numbers Signed-off-by: Cyrill Gorcunov Acked-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 7ad489af956..7ec2888bffc 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -153,6 +153,10 @@ #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #endif +#define OO_SHIFT 16 +#define OO_MASK ((1 << OO_SHIFT) - 1) +#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ + /* Internal SLUB flags */ #define __OBJECT_POISON 0x80000000 /* Poison object */ #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ @@ -290,7 +294,7 @@ static inline struct kmem_cache_order_objects oo_make(int order, unsigned long size) { struct kmem_cache_order_objects x = { - (order << 16) + (PAGE_SIZE << order) / size + (order << OO_SHIFT) + (PAGE_SIZE << order) / size }; return x; @@ -298,12 +302,12 @@ static inline struct kmem_cache_order_objects oo_make(int order, static inline int oo_order(struct kmem_cache_order_objects x) { - return x.x >> 16; + return x.x >> OO_SHIFT; } static inline int oo_objects(struct kmem_cache_order_objects x) { - return x.x & ((1 << 16) - 1); + return x.x & OO_MASK; } #ifdef CONFIG_SLUB_DEBUG @@ -764,8 +768,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) } max_objects = (PAGE_SIZE << compound_order(page)) / s->size; - if (max_objects > 65535) - max_objects = 65535; + if (max_objects > MAX_OBJS_PER_PAGE) + max_objects = MAX_OBJS_PER_PAGE; if (page->objects != max_objects) { slab_err(s, page, "Wrong number of objects. Found %d but " @@ -1807,8 +1811,8 @@ static inline int slab_order(int size, int min_objects, int rem; int min_order = slub_min_order; - if ((PAGE_SIZE << min_order) / size > 65535) - return get_order(size * 65535) - 1; + if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE) + return get_order(size * MAX_OBJS_PER_PAGE) - 1; for (order = max(min_order, fls(min_objects * size - 1) - PAGE_SHIFT); From ce71e27c6fdc43c29f36d307b9100bde70c947fc Mon Sep 17 00:00:00 2001 From: Eduard - Gabriel Munteanu Date: Tue, 19 Aug 2008 20:43:25 +0300 Subject: [PATCH 02/12] SLUB: Replace __builtin_return_address(0) with _RET_IP_. This patch replaces __builtin_return_address(0) with _RET_IP_, since a previous patch moved _RET_IP_ and _THIS_IP_ to include/linux/kernel.h and they're widely available now. This makes for shorter and easier to read code. [penberg@cs.helsinki.fi: remove _RET_IP_ casts to void pointer] Signed-off-by: Eduard - Gabriel Munteanu Signed-off-by: Pekka Enberg --- include/linux/slab.h | 8 ++++---- mm/slab.c | 8 ++++---- mm/slub.c | 48 ++++++++++++++++++++++---------------------- 3 files changed, 32 insertions(+), 32 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 000da12b5cf..c97ed28559e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -253,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, * request comes from. */ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) -extern void *__kmalloc_track_caller(size_t, gfp_t, void*); +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); #define kmalloc_track_caller(size, flags) \ - __kmalloc_track_caller(size, flags, __builtin_return_address(0)) + __kmalloc_track_caller(size, flags, _RET_IP_) #else #define kmalloc_track_caller(size, flags) \ __kmalloc(size, flags) @@ -271,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*); * allocation request comes from. */ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node_track_caller(size, flags, node, \ - __builtin_return_address(0)) + _RET_IP_) #else #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node(size, flags, node) diff --git a/mm/slab.c b/mm/slab.c index 09187517f9d..a1478779901 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3686,9 +3686,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) EXPORT_SYMBOL(__kmalloc_node); void *__kmalloc_node_track_caller(size_t size, gfp_t flags, - int node, void *caller) + int node, unsigned long caller) { - return __do_kmalloc_node(size, flags, node, caller); + return __do_kmalloc_node(size, flags, node, (void *)caller); } EXPORT_SYMBOL(__kmalloc_node_track_caller); #else @@ -3730,9 +3730,9 @@ void *__kmalloc(size_t size, gfp_t flags) } EXPORT_SYMBOL(__kmalloc); -void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) +void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) { - return __do_kmalloc(size, flags, caller); + return __do_kmalloc(size, flags, (void *)caller); } EXPORT_SYMBOL(__kmalloc_track_caller); diff --git a/mm/slub.c b/mm/slub.c index 7ec2888bffc..68ab260583d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -182,7 +182,7 @@ static LIST_HEAD(slab_caches); * Tracking user of a slab. */ struct track { - void *addr; /* Called from address */ + unsigned long addr; /* Called from address */ int cpu; /* Was running on cpu */ int pid; /* Pid context */ unsigned long when; /* When did the operation occur */ @@ -371,7 +371,7 @@ static struct track *get_track(struct kmem_cache *s, void *object, } static void set_track(struct kmem_cache *s, void *object, - enum track_item alloc, void *addr) + enum track_item alloc, unsigned long addr) { struct track *p; @@ -395,8 +395,8 @@ static void init_tracking(struct kmem_cache *s, void *object) if (!(s->flags & SLAB_STORE_USER)) return; - set_track(s, object, TRACK_FREE, NULL); - set_track(s, object, TRACK_ALLOC, NULL); + set_track(s, object, TRACK_FREE, 0UL); + set_track(s, object, TRACK_ALLOC, 0UL); } static void print_track(const char *s, struct track *t) @@ -405,7 +405,7 @@ static void print_track(const char *s, struct track *t) return; printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", - s, t->addr, jiffies - t->when, t->cpu, t->pid); + s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); } static void print_tracking(struct kmem_cache *s, void *object) @@ -870,7 +870,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, } static int alloc_debug_processing(struct kmem_cache *s, struct page *page, - void *object, void *addr) + void *object, unsigned long addr) { if (!check_slab(s, page)) goto bad; @@ -910,7 +910,7 @@ bad: } static int free_debug_processing(struct kmem_cache *s, struct page *page, - void *object, void *addr) + void *object, unsigned long addr) { if (!check_slab(s, page)) goto fail; @@ -1033,10 +1033,10 @@ static inline void setup_object_debug(struct kmem_cache *s, struct page *page, void *object) {} static inline int alloc_debug_processing(struct kmem_cache *s, - struct page *page, void *object, void *addr) { return 0; } + struct page *page, void *object, unsigned long addr) { return 0; } static inline int free_debug_processing(struct kmem_cache *s, - struct page *page, void *object, void *addr) { return 0; } + struct page *page, void *object, unsigned long addr) { return 0; } static inline int slab_pad_check(struct kmem_cache *s, struct page *page) { return 1; } @@ -1503,8 +1503,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node) * we need to allocate a new slab. This is the slowest path since it involves * a call to the page allocator and the setup of a new slab. */ -static void *__slab_alloc(struct kmem_cache *s, - gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) +static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + unsigned long addr, struct kmem_cache_cpu *c) { void **object; struct page *new; @@ -1588,7 +1588,7 @@ debug: * Otherwise we can simply pick the next object from the lockless free list. */ static __always_inline void *slab_alloc(struct kmem_cache *s, - gfp_t gfpflags, int node, void *addr) + gfp_t gfpflags, int node, unsigned long addr) { void **object; struct kmem_cache_cpu *c; @@ -1617,14 +1617,14 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { - return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); + return slab_alloc(s, gfpflags, -1, _RET_IP_); } EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_NUMA void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { - return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); + return slab_alloc(s, gfpflags, node, _RET_IP_); } EXPORT_SYMBOL(kmem_cache_alloc_node); #endif @@ -1638,7 +1638,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); * handling required then we can return immediately. */ static void __slab_free(struct kmem_cache *s, struct page *page, - void *x, void *addr, unsigned int offset) + void *x, unsigned long addr, unsigned int offset) { void *prior; void **object = (void *)x; @@ -1708,7 +1708,7 @@ debug: * with all sorts of special processing. */ static __always_inline void slab_free(struct kmem_cache *s, - struct page *page, void *x, void *addr) + struct page *page, void *x, unsigned long addr) { void **object = (void *)x; struct kmem_cache_cpu *c; @@ -1735,7 +1735,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x) page = virt_to_head_page(x); - slab_free(s, page, x, __builtin_return_address(0)); + slab_free(s, page, x, _RET_IP_); } EXPORT_SYMBOL(kmem_cache_free); @@ -2663,7 +2663,7 @@ void *__kmalloc(size_t size, gfp_t flags) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - return slab_alloc(s, flags, -1, __builtin_return_address(0)); + return slab_alloc(s, flags, -1, _RET_IP_); } EXPORT_SYMBOL(__kmalloc); @@ -2691,7 +2691,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - return slab_alloc(s, flags, node, __builtin_return_address(0)); + return slab_alloc(s, flags, node, _RET_IP_); } EXPORT_SYMBOL(__kmalloc_node); #endif @@ -2748,7 +2748,7 @@ void kfree(const void *x) put_page(page); return; } - slab_free(page->slab, page, object, __builtin_return_address(0)); + slab_free(page->slab, page, object, _RET_IP_); } EXPORT_SYMBOL(kfree); @@ -3204,7 +3204,7 @@ static struct notifier_block __cpuinitdata slab_notifier = { #endif -void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) +void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) { struct kmem_cache *s; @@ -3220,7 +3220,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) } void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, - int node, void *caller) + int node, unsigned long caller) { struct kmem_cache *s; @@ -3431,7 +3431,7 @@ static void resiliency_test(void) {}; struct location { unsigned long count; - void *addr; + unsigned long addr; long long sum_time; long min_time; long max_time; @@ -3479,7 +3479,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, { long start, end, pos; struct location *l; - void *caddr; + unsigned long caddr; unsigned long age = jiffies - track->when; start = -1; From e9beef1815ab3aa88925595582cf09e64b2b9894 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Tue, 28 Oct 2008 22:02:26 +0300 Subject: [PATCH 03/12] slub - fix get_object_page comment Use 'slab page' instead of 'slab object'. Acked-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/slub.c b/mm/slub.c index 68ab260583d..8f4edacd082 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1739,7 +1739,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x) } EXPORT_SYMBOL(kmem_cache_free); -/* Figure out on which slab object the object resides */ +/* Figure out on which slab page the object resides */ static struct page *get_object_page(const void *x) { struct page *page = virt_to_head_page(x); From 249b9f331ec162af5a1fdb80f90cce77c2043985 Mon Sep 17 00:00:00 2001 From: roel kluin Date: Wed, 29 Oct 2008 17:18:07 -0400 Subject: [PATCH 04/12] slab: unsigned slabp->inuse cannot be less than 0 unsigned slabp->inuse cannot be less than 0 Acked-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/slab.c b/mm/slab.c index a1478779901..445bcc87b34 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2997,7 +2997,7 @@ retry: * there must be at least one object available for * allocation. */ - BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); + BUG_ON(slabp->inuse >= cachep->num); while (slabp->inuse < cachep->num && batchcount--) { STATS_INC_ALLOCED(cachep); From 0094de92a4f1da3a845ccc4ecb12ec0db8e48997 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 25 Nov 2008 19:14:19 -0800 Subject: [PATCH 05/12] slub: make early_kmem_cache_node_alloc void The return value for early_kmem_cache_node_alloc() is unused, so it is better defined as void. Acked-by: Christoph Lameter Signed-off-by: David Rientjes Signed-off-by: Pekka Enberg --- mm/slub.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 8f4edacd082..b6968899cb5 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2077,8 +2077,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) * when allocating for the kmalloc_node_cache. This is used for bootstrapping * memory on a fresh node that has no slab structures yet. */ -static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, - int node) +static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node) { struct page *page; struct kmem_cache_node *n; @@ -2116,7 +2115,6 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, local_irq_save(flags); add_partial(n, page, 0); local_irq_restore(flags); - return n; } static void free_kmem_cache_nodes(struct kmem_cache *s) @@ -2148,8 +2146,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) n = &s->local_node; else { if (slab_state == DOWN) { - n = early_kmem_cache_node_alloc(gfpflags, - node); + early_kmem_cache_node_alloc(gfpflags, node); continue; } n = kmem_cache_alloc_node(kmalloc_caches, From 249da166582801648432d0198be9407fb5ccf9f5 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 21 Nov 2008 12:56:22 +0000 Subject: [PATCH 06/12] slab: Update the kmem_cache_create documentation regarding the name parameter kmem_cache implementations like slub are allowed to merge multiple caches but only the initial name is preserved. Therefore, kmem_cache_name() is not guaranteed to return the same pointer passed to the former function. This patch updates the documentation to make this clearer. Signed-off-by: Catalin Marinas Signed-off-by: Pekka Enberg --- mm/slab.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/slab.c b/mm/slab.c index 09187517f9d..c366e3910e8 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2123,6 +2123,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) * * @name must be valid until the cache is destroyed. This implies that * the module calling this has to destroy the cache before getting unloaded. + * Note that kmem_cache_name() is not guaranteed to return the same pointer, + * therefore applications must manage it themselves. * * The flags are * From 9f6c708e5cbf57ee31f6ddaa2cd0262087271b95 Mon Sep 17 00:00:00 2001 From: Nick Andrew Date: Fri, 5 Dec 2008 14:08:08 +1100 Subject: [PATCH 07/12] slub: Fix incorrect use of loose It should be 'lose', not 'loose'. Signed-off-by: Nick Andrew Signed-off-by: Pekka Enberg --- mm/slub.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index b6968899cb5..5c8dbe3ae50 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -696,7 +696,7 @@ static int check_object(struct kmem_cache *s, struct page *page, if (!check_valid_pointer(s, page, get_freepointer(s, p))) { object_err(s, page, p, "Freepointer corrupt"); /* - * No choice but to zap it and thus loose the remainder + * No choice but to zap it and thus lose the remainder * of the free objects in this slab. May cause * another error because the object count is now wrong. */ @@ -4344,7 +4344,7 @@ static void sysfs_slab_remove(struct kmem_cache *s) /* * Need to buffer aliases during bootup until sysfs becomes - * available lest we loose that information. + * available lest we lose that information. */ struct saved_alias { struct kmem_cache *s; From 773ff60e841461cb1f9374a713ffcda029b8c317 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Tue, 23 Dec 2008 19:37:01 +0900 Subject: [PATCH 08/12] SLUB: failslab support Currently fault-injection capability for SLAB allocator is only available to SLAB. This patch makes it available to SLUB, too. [penberg@cs.helsinki.fi: unify slab and slub implementations] Cc: Christoph Lameter Cc: Matt Mackall Signed-off-by: Akinobu Mita Signed-off-by: Pekka Enberg --- include/linux/fault-inject.h | 9 +++++ lib/Kconfig.debug | 1 + mm/Makefile | 1 + mm/failslab.c | 59 ++++++++++++++++++++++++++++ mm/slab.c | 75 +++--------------------------------- mm/slub.c | 4 ++ 6 files changed, 79 insertions(+), 70 deletions(-) create mode 100644 mm/failslab.c diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 32368c4f032..06ca9b21dad 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -81,4 +81,13 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr) #endif /* CONFIG_FAULT_INJECTION */ +#ifdef CONFIG_FAILSLAB +extern bool should_failslab(size_t size, gfp_t gfpflags); +#else +static inline bool should_failslab(size_t size, gfp_t gfpflags) +{ + return false; +} +#endif /* CONFIG_FAILSLAB */ + #endif /* _LINUX_FAULT_INJECT_H */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b0f239e443b..af65ae7f054 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -699,6 +699,7 @@ config FAULT_INJECTION config FAILSLAB bool "Fault-injection capability for kmalloc" depends on FAULT_INJECTION + depends on SLAB || SLUB help Provide fault-injection capability for kmalloc. diff --git a/mm/Makefile b/mm/Makefile index c06b45a1ff5..51c27709cc7 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_SLOB) += slob.o obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLUB) += slub.o +obj-$(CONFIG_FAILSLAB) += failslab.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_MIGRATION) += migrate.o diff --git a/mm/failslab.c b/mm/failslab.c new file mode 100644 index 00000000000..7c6ea6493f8 --- /dev/null +++ b/mm/failslab.c @@ -0,0 +1,59 @@ +#include + +static struct { + struct fault_attr attr; + u32 ignore_gfp_wait; +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + struct dentry *ignore_gfp_wait_file; +#endif +} failslab = { + .attr = FAULT_ATTR_INITIALIZER, + .ignore_gfp_wait = 1, +}; + +bool should_failslab(size_t size, gfp_t gfpflags) +{ + if (gfpflags & __GFP_NOFAIL) + return false; + + if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) + return false; + + return should_fail(&failslab.attr, size); +} + +static int __init setup_failslab(char *str) +{ + return setup_fault_attr(&failslab.attr, str); +} +__setup("failslab=", setup_failslab); + +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + +static int __init failslab_debugfs_init(void) +{ + mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; + struct dentry *dir; + int err; + + err = init_fault_attr_dentries(&failslab.attr, "failslab"); + if (err) + return err; + dir = failslab.attr.dentries.dir; + + failslab.ignore_gfp_wait_file = + debugfs_create_bool("ignore-gfp-wait", mode, dir, + &failslab.ignore_gfp_wait); + + if (!failslab.ignore_gfp_wait_file) { + err = -ENOMEM; + debugfs_remove(failslab.ignore_gfp_wait_file); + cleanup_fault_attr_dentries(&failslab.attr); + } + + return err; +} + +late_initcall(failslab_debugfs_init); + +#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ diff --git a/mm/slab.c b/mm/slab.c index 09187517f9d..c347dd8480c 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3106,79 +3106,14 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #endif -#ifdef CONFIG_FAILSLAB - -static struct failslab_attr { - - struct fault_attr attr; - - u32 ignore_gfp_wait; -#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS - struct dentry *ignore_gfp_wait_file; -#endif - -} failslab = { - .attr = FAULT_ATTR_INITIALIZER, - .ignore_gfp_wait = 1, -}; - -static int __init setup_failslab(char *str) -{ - return setup_fault_attr(&failslab.attr, str); -} -__setup("failslab=", setup_failslab); - -static int should_failslab(struct kmem_cache *cachep, gfp_t flags) +static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) { if (cachep == &cache_cache) - return 0; - if (flags & __GFP_NOFAIL) - return 0; - if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT)) - return 0; + return false; - return should_fail(&failslab.attr, obj_size(cachep)); + return should_failslab(obj_size(cachep), flags); } -#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS - -static int __init failslab_debugfs(void) -{ - mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; - struct dentry *dir; - int err; - - err = init_fault_attr_dentries(&failslab.attr, "failslab"); - if (err) - return err; - dir = failslab.attr.dentries.dir; - - failslab.ignore_gfp_wait_file = - debugfs_create_bool("ignore-gfp-wait", mode, dir, - &failslab.ignore_gfp_wait); - - if (!failslab.ignore_gfp_wait_file) { - err = -ENOMEM; - debugfs_remove(failslab.ignore_gfp_wait_file); - cleanup_fault_attr_dentries(&failslab.attr); - } - - return err; -} - -late_initcall(failslab_debugfs); - -#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ - -#else /* CONFIG_FAILSLAB */ - -static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags) -{ - return 0; -} - -#endif /* CONFIG_FAILSLAB */ - static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *objp; @@ -3381,7 +3316,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, unsigned long save_flags; void *ptr; - if (should_failslab(cachep, flags)) + if (slab_should_failslab(cachep, flags)) return NULL; cache_alloc_debugcheck_before(cachep, flags); @@ -3457,7 +3392,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) unsigned long save_flags; void *objp; - if (should_failslab(cachep, flags)) + if (slab_should_failslab(cachep, flags)) return NULL; cache_alloc_debugcheck_before(cachep, flags); diff --git a/mm/slub.c b/mm/slub.c index a2cd47d89e0..640fde7e354 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -24,6 +24,7 @@ #include #include #include +#include /* * Lock order: @@ -1591,6 +1592,9 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, unsigned long flags; unsigned int objsize; + if (should_failslab(s->objsize, gfpflags)) + return NULL; + local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); objsize = c->objsize; From 89124d706db0aa95daacfa4c0df45a43a44d44f4 Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Wed, 19 Nov 2008 21:23:59 +0900 Subject: [PATCH 09/12] slub: Add might_sleep_if() to slab_alloc() Currently SLUB doesn't warn about __GFP_WAIT. Add it into slab_alloc(). Acked-by: Christoph Lameter Signed-off-by: OGAWA Hirofumi Signed-off-by: Pekka Enberg --- mm/slub.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/slub.c b/mm/slub.c index a2cd47d89e0..704cfa34f9a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1591,6 +1591,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, unsigned long flags; unsigned int objsize; + might_sleep_if(gfpflags & __GFP_WAIT); local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); objsize = c->objsize; From 8759ec50a6cad7ca5a6d63e657d25b85ab5ba44a Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Wed, 26 Nov 2008 10:01:31 +0200 Subject: [PATCH 10/12] slab: remove GFP_THISNODE clearing from alloc_slabmgmt() Commit 6cb062296f73e74768cca2f3eaf90deac54de02d ("Categorize GFP flags") left one call-site in alloc_slabmgmt() to clear GFP_THISNODE instead of GFP_CONSTRAINT_MASK. Unfortunately, that ends up clearing __GFP_NOWARN and __GFP_NORETRY as well which is not what we want. As the only caller of alloc_slabmgmt() already clears GFP_CONSTRAINT_MASK before passing local_flags to it, we can just remove the clearing of GFP_THISNODE. This patch should fix spurious page allocation failure warnings on the mempool_alloc() path. See the following URL for the original discussion of the bug: http://lkml.org/lkml/2008/10/27/100 Acked-by: Christoph Lameter Reported-by: Miklos Szeredi Signed-off-by: Pekka Enberg --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/slab.c b/mm/slab.c index 09187517f9d..d4b87690b27 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2609,7 +2609,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, if (OFF_SLAB(cachep)) { /* Slab management obj is off-slab. */ slabp = kmem_cache_alloc_node(cachep->slabp_cache, - local_flags & ~GFP_THISNODE, nodeid); + local_flags, nodeid); if (!slabp) return NULL; } else { From dfcd3610289132a762b7dc0eaf33998262cd9e20 Mon Sep 17 00:00:00 2001 From: Pascal Terjan Date: Tue, 25 Nov 2008 15:08:19 +0100 Subject: [PATCH 11/12] slab: Fix comment on #endif This #endif in slab.h is described as closing the inner block while it's for the big CONFIG_NUMA one. That makes reading the code a bit harder. This trivial patch fixes the comment. Signed-off-by: Pascal Terjan Signed-off-by: Pekka Enberg --- include/linux/slab.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 000da12b5cf..9d8ca14be3c 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -285,7 +285,7 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); #define kmalloc_node_track_caller(size, flags, node) \ kmalloc_track_caller(size, flags) -#endif /* DEBUG_SLAB */ +#endif /* CONFIG_NUMA */ /* * Shortcuts From 7b8f3b66d9d7e5f021ae535620b9b52833f4876e Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Wed, 17 Dec 2008 22:09:46 -0800 Subject: [PATCH 12/12] slub: avoid leaking caches or refcounts on sysfs error If a slab cache is mergeable and the sysfs alias cannot be added, the target cache shall have its refcount decremented. kmem_cache_create() will return NULL, so if kmem_cache_destroy() is ever called on the target cache, it will never be freed if the refcount has been leaked. Likewise, if a slab cache is not mergeable and the sysfs link cannot be added, the new cache shall be removed from the slab_caches list. kmem_cache_create() will return NULL, so it will be impossible to call kmem_cache_destroy() on it. Both of these operations require slub_lock since refcount of all slab caches and slab_caches are protected by the lock. In the mergeable case, it would be better to restore objsize and offset back to their original values, but this could race with another merge since slub_lock was dropped. Cc: Christoph Lameter Signed-off-by: David Rientjes Signed-off-by: Pekka Enberg --- mm/slub.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 704cfa34f9a..d057ceb3645 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3124,8 +3124,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); up_write(&slub_lock); - if (sysfs_slab_alias(s, name)) + if (sysfs_slab_alias(s, name)) { + down_write(&slub_lock); + s->refcount--; + up_write(&slub_lock); goto err; + } return s; } @@ -3135,8 +3139,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size, align, flags, ctor)) { list_add(&s->list, &slab_caches); up_write(&slub_lock); - if (sysfs_slab_add(s)) + if (sysfs_slab_add(s)) { + down_write(&slub_lock); + list_del(&s->list); + up_write(&slub_lock); + kfree(s); goto err; + } return s; } kfree(s);