Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 148172
b: refs/heads/master
c: 7e85ee0
h: refs/heads/master
v: v3
  • Loading branch information
Pekka Enberg committed Jun 12, 2009
1 parent 53b95ee commit 8aa0327
Show file tree
Hide file tree
Showing 8 changed files with 48 additions and 1 deletion.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: eb91f1d0a531289e18f5587dc197d12a251c66a3
refs/heads/master: 7e85ee0c1d15ca5f8bff0f514f158eba1742dd87
3 changes: 3 additions & 0 deletions trunk/include/linux/gfp.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,9 @@ struct vm_area_struct;
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
__GFP_NORETRY|__GFP_NOMEMALLOC)

/* Control slab gfp mask during early boot */
#define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)

/* Control allocation constraints */
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)

Expand Down
2 changes: 2 additions & 0 deletions trunk/include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -319,4 +319,6 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
return kmalloc_node(size, flags | __GFP_ZERO, node);
}

void __init kmem_cache_init_late(void);

#endif /* _LINUX_SLAB_H */
5 changes: 5 additions & 0 deletions trunk/include/linux/slob_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,9 @@ static __always_inline void *__kmalloc(size_t size, gfp_t flags)
return kmalloc(size, flags);
}

static inline void kmem_cache_init_late(void)
{
/* Nothing to do */
}

#endif /* __LINUX_SLOB_DEF_H */
2 changes: 2 additions & 0 deletions trunk/include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -302,4 +302,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
}
#endif

void __init kmem_cache_init_late(void);

#endif /* _LINUX_SLUB_DEF_H */
1 change: 1 addition & 0 deletions trunk/init/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -640,6 +640,7 @@ asmlinkage void __init start_kernel(void)
"enabled early\n");
early_boot_irqs_on();
local_irq_enable();
kmem_cache_init_late();

/*
* HACK ALERT! This is early. We're enabling the console before
Expand Down
18 changes: 18 additions & 0 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -303,6 +303,12 @@ struct kmem_list3 {
int free_touched; /* updated without locking */
};

/*
* The slab allocator is initialized with interrupts disabled. Therefore, make
* sure early boot allocations don't accidentally enable interrupts.
*/
static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;

/*
* Need this for bootstrapping a per node allocator.
*/
Expand Down Expand Up @@ -1654,6 +1660,14 @@ void __init kmem_cache_init(void)
*/
}

void __init kmem_cache_init_late(void)
{
/*
* Interrupts are enabled now so all GFP allocations are safe.
*/
slab_gfp_mask = __GFP_BITS_MASK;
}

static int __init cpucache_init(void)
{
int cpu;
Expand Down Expand Up @@ -3354,6 +3368,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long save_flags;
void *ptr;

flags &= slab_gfp_mask;

lockdep_trace_alloc(flags);

if (slab_should_failslab(cachep, flags))
Expand Down Expand Up @@ -3434,6 +3450,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
unsigned long save_flags;
void *objp;

flags &= slab_gfp_mask;

lockdep_trace_alloc(flags);

if (slab_should_failslab(cachep, flags))
Expand Down
16 changes: 16 additions & 0 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,12 @@ static enum {
SYSFS /* Sysfs up */
} slab_state = DOWN;

/*
* The slab allocator is initialized with interrupts disabled. Therefore, make
* sure early boot allocations don't accidentally enable interrupts.
*/
static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;

/* A list of all slab caches on the system */
static DECLARE_RWSEM(slub_lock);
static LIST_HEAD(slab_caches);
Expand Down Expand Up @@ -1595,6 +1601,8 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
unsigned long flags;
unsigned int objsize;

gfpflags &= slab_gfp_mask;

lockdep_trace_alloc(gfpflags);
might_sleep_if(gfpflags & __GFP_WAIT);

Expand Down Expand Up @@ -3104,6 +3112,14 @@ void __init kmem_cache_init(void)
nr_cpu_ids, nr_node_ids);
}

void __init kmem_cache_init_late(void)
{
/*
* Interrupts are enabled now so all GFP allocations are safe.
*/
slab_gfp_mask = __GFP_BITS_MASK;
}

/*
* Find a mergeable slab cache
*/
Expand Down

0 comments on commit 8aa0327

Please sign in to comment.