Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 56507
b: refs/heads/master
c: afc0ced
h: refs/heads/master
i:
  56505: f1cc557
  56503: 3836790
v: v3
  • Loading branch information
Nick Piggin authored and Linus Torvalds committed May 17, 2007
1 parent 13774e2 commit 1b8a471
Show file tree
Hide file tree
Showing 3 changed files with 48 additions and 13 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: b2cd64153b94473f6bd82448a68b8e8c041676ea
refs/heads/master: afc0cedbe9138e3e8b38bfa1e4dfd01a2c537d62
7 changes: 2 additions & 5 deletions trunk/init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -577,14 +577,11 @@ config SLUB
and has enhanced diagnostics.

config SLOB
#
# SLOB does not support SMP because SLAB_DESTROY_BY_RCU is unsupported
#
depends on EMBEDDED && !SMP && !SPARSEMEM
depends on EMBEDDED && !SPARSEMEM
bool "SLOB (Simple Allocator)"
help
SLOB replaces the SLAB allocator with a drastically simpler
allocator. SLOB is more space efficient that SLAB but does not
allocator. SLOB is more space efficient than SLAB but does not
scale well (single lock for all operations) and is also highly
susceptible to fragmentation. SLUB can accomplish a higher object
density. It is usually better to use SLUB instead of SLOB.
Expand Down
52 changes: 45 additions & 7 deletions trunk/mm/slob.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/rcupdate.h>

struct slob_block {
int units;
Expand All @@ -53,6 +54,16 @@ struct bigblock {
};
typedef struct bigblock bigblock_t;

/*
* struct slob_rcu is inserted at the tail of allocated slob blocks, which
* were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
* the block using call_rcu.
*/
struct slob_rcu {
struct rcu_head head;
int size;
};

static slob_t arena = { .next = &arena, .units = 1 };
static slob_t *slobfree = &arena;
static bigblock_t *bigblocks;
Expand Down Expand Up @@ -266,6 +277,7 @@ size_t ksize(const void *block)

struct kmem_cache {
unsigned int size, align;
unsigned long flags;
const char *name;
void (*ctor)(void *, struct kmem_cache *, unsigned long);
void (*dtor)(void *, struct kmem_cache *, unsigned long);
Expand All @@ -283,6 +295,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
if (c) {
c->name = name;
c->size = size;
if (flags & SLAB_DESTROY_BY_RCU) {
BUG_ON(dtor);
/* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu);
}
c->flags = flags;
c->ctor = ctor;
c->dtor = dtor;
/* ignore alignment unless it's forced */
Expand Down Expand Up @@ -328,15 +346,35 @@ void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
}
EXPORT_SYMBOL(kmem_cache_zalloc);

void kmem_cache_free(struct kmem_cache *c, void *b)
static void __kmem_cache_free(void *b, int size)
{
if (c->dtor)
c->dtor(b, c, 0);

if (c->size < PAGE_SIZE)
slob_free(b, c->size);
if (size < PAGE_SIZE)
slob_free(b, size);
else
free_pages((unsigned long)b, get_order(c->size));
free_pages((unsigned long)b, get_order(size));
}

static void kmem_rcu_free(struct rcu_head *head)
{
struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));

__kmem_cache_free(b, slob_rcu->size);
}

void kmem_cache_free(struct kmem_cache *c, void *b)
{
if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
struct slob_rcu *slob_rcu;
slob_rcu = b + (c->size - sizeof(struct slob_rcu));
INIT_RCU_HEAD(&slob_rcu->head);
slob_rcu->size = c->size;
call_rcu(&slob_rcu->head, kmem_rcu_free);
} else {
if (c->dtor)
c->dtor(b, c, 0);
__kmem_cache_free(b, c->size);
}
}
EXPORT_SYMBOL(kmem_cache_free);

Expand Down

0 comments on commit 1b8a471

Please sign in to comment.