Skip to content

Commit

Permalink
[PATCH] Manage jbd allocations from its own slabs
Browse files Browse the repository at this point in the history
JBD currently allocates commit and frozen buffers from slabs.  With
CONFIG_SLAB_DEBUG, its possible for an allocation to cross the page
boundary causing IO problems.

https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=200127

So, instead of allocating these from regular slabs - manage allocation from
its own slabs and disable slab debug for these slabs.

[akpm@osdl.org: cleanups]
Signed-off-by: Badari Pulavarty <pbadari@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Badari Pulavarty authored and Linus Torvalds committed Aug 27, 2006
1 parent 4c4d50f commit ea81739
Show file tree
Hide file tree
Showing 4 changed files with 97 additions and 13 deletions.
6 changes: 3 additions & 3 deletions fs/jbd/commit.c
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ void journal_commit_transaction(journal_t *journal)
struct buffer_head *bh = jh2bh(jh);

jbd_lock_bh_state(bh);
kfree(jh->b_committed_data);
jbd_slab_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL;
jbd_unlock_bh_state(bh);
}
Expand Down Expand Up @@ -745,14 +745,14 @@ void journal_commit_transaction(journal_t *journal)
* Otherwise, we can just throw away the frozen data now.
*/
if (jh->b_committed_data) {
kfree(jh->b_committed_data);
jbd_slab_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL;
if (jh->b_frozen_data) {
jh->b_committed_data = jh->b_frozen_data;
jh->b_frozen_data = NULL;
}
} else if (jh->b_frozen_data) {
kfree(jh->b_frozen_data);
jbd_slab_free(jh->b_frozen_data, bh->b_size);
jh->b_frozen_data = NULL;
}

Expand Down
92 changes: 86 additions & 6 deletions fs/jbd/journal.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ EXPORT_SYMBOL(journal_force_commit);

static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
static void __journal_abort_soft (journal_t *journal, int errno);
static int journal_create_jbd_slab(size_t slab_size);

/*
* Helper function used to manage commit timeouts
Expand Down Expand Up @@ -328,10 +329,10 @@ int journal_write_metadata_buffer(transaction_t *transaction,
char *tmp;

jbd_unlock_bh_state(bh_in);
tmp = jbd_rep_kmalloc(bh_in->b_size, GFP_NOFS);
tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS);
jbd_lock_bh_state(bh_in);
if (jh_in->b_frozen_data) {
kfree(tmp);
jbd_slab_free(tmp, bh_in->b_size);
goto repeat;
}

Expand Down Expand Up @@ -1069,17 +1070,17 @@ static int load_superblock(journal_t *journal)
int journal_load(journal_t *journal)
{
int err;
journal_superblock_t *sb;

err = load_superblock(journal);
if (err)
return err;

sb = journal->j_superblock;
/* If this is a V2 superblock, then we have to check the
* features flags on it. */

if (journal->j_format_version >= 2) {
journal_superblock_t *sb = journal->j_superblock;

if ((sb->s_feature_ro_compat &
~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
(sb->s_feature_incompat &
Expand All @@ -1090,6 +1091,13 @@ int journal_load(journal_t *journal)
}
}

/*
* Create a slab for this blocksize
*/
err = journal_create_jbd_slab(cpu_to_be32(sb->s_blocksize));
if (err)
return err;

/* Let the recovery code check whether it needs to recover any
* data from the journal. */
if (journal_recover(journal))
Expand Down Expand Up @@ -1611,6 +1619,77 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
}

/*
* jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
* and allocate frozen and commit buffers from these slabs.
*
* Reason for doing this is to avoid, SLAB_DEBUG - since it could
* cause bh to cross page boundary.
*/

#define JBD_MAX_SLABS 5
#define JBD_SLAB_INDEX(size) (size >> 11)

static kmem_cache_t *jbd_slab[JBD_MAX_SLABS];
static const char *jbd_slab_names[JBD_MAX_SLABS] = {
"jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
};

static void journal_destroy_jbd_slabs(void)
{
int i;

for (i = 0; i < JBD_MAX_SLABS; i++) {
if (jbd_slab[i])
kmem_cache_destroy(jbd_slab[i]);
jbd_slab[i] = NULL;
}
}

static int journal_create_jbd_slab(size_t slab_size)
{
int i = JBD_SLAB_INDEX(slab_size);

BUG_ON(i >= JBD_MAX_SLABS);

/*
* Check if we already have a slab created for this size
*/
if (jbd_slab[i])
return 0;

/*
* Create a slab and force alignment to be same as slabsize -
* this will make sure that allocations won't cross the page
* boundary.
*/
jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
slab_size, slab_size, 0, NULL, NULL);
if (!jbd_slab[i]) {
printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
return -ENOMEM;
}
return 0;
}

void * jbd_slab_alloc(size_t size, gfp_t flags)
{
int idx;

idx = JBD_SLAB_INDEX(size);
BUG_ON(jbd_slab[idx] == NULL);
return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
}

void jbd_slab_free(void *ptr, size_t size)
{
int idx;

idx = JBD_SLAB_INDEX(size);
BUG_ON(jbd_slab[idx] == NULL);
kmem_cache_free(jbd_slab[idx], ptr);
}

/*
* Journal_head storage management
*/
Expand Down Expand Up @@ -1799,13 +1878,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
printk(KERN_WARNING "%s: freeing "
"b_frozen_data\n",
__FUNCTION__);
kfree(jh->b_frozen_data);
jbd_slab_free(jh->b_frozen_data, bh->b_size);
}
if (jh->b_committed_data) {
printk(KERN_WARNING "%s: freeing "
"b_committed_data\n",
__FUNCTION__);
kfree(jh->b_committed_data);
jbd_slab_free(jh->b_committed_data, bh->b_size);
}
bh->b_private = NULL;
jh->b_bh = NULL; /* debug, really */
Expand Down Expand Up @@ -1961,6 +2040,7 @@ static void journal_destroy_caches(void)
journal_destroy_revoke_caches();
journal_destroy_journal_head_cache();
journal_destroy_handle_cache();
journal_destroy_jbd_slabs();
}

static int __init journal_init(void)
Expand Down
9 changes: 5 additions & 4 deletions fs/jbd/transaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -666,8 +666,9 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
if (!frozen_buffer) {
JBUFFER_TRACE(jh, "allocate memory for buffer");
jbd_unlock_bh_state(bh);
frozen_buffer = jbd_kmalloc(jh2bh(jh)->b_size,
GFP_NOFS);
frozen_buffer =
jbd_slab_alloc(jh2bh(jh)->b_size,
GFP_NOFS);
if (!frozen_buffer) {
printk(KERN_EMERG
"%s: OOM for frozen_buffer\n",
Expand Down Expand Up @@ -879,7 +880,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)

repeat:
if (!jh->b_committed_data) {
committed_data = jbd_kmalloc(jh2bh(jh)->b_size, GFP_NOFS);
committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
if (!committed_data) {
printk(KERN_EMERG "%s: No memory for committed data\n",
__FUNCTION__);
Expand All @@ -906,7 +907,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
out:
journal_put_journal_head(jh);
if (unlikely(committed_data))
kfree(committed_data);
jbd_slab_free(committed_data, bh->b_size);
return err;
}

Expand Down
3 changes: 3 additions & 0 deletions include/linux/jbd.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,9 @@ extern int journal_enable_debug;
#endif

extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
extern void * jbd_slab_alloc(size_t size, gfp_t flags);
extern void jbd_slab_free(void *ptr, size_t size);

#define jbd_kmalloc(size, flags) \
__jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
#define jbd_rep_kmalloc(size, flags) \
Expand Down

0 comments on commit ea81739

Please sign in to comment.