Skip to content

Commit

Permalink
md/raid5: avoid oops when number of devices is reduced then increased.
Browse files Browse the repository at this point in the history
The entries in the stripe_cache maintained by raid5 are enlarged
when we increased the number of devices in the array, but not
shrunk when we reduce the number of devices.
So if entries are added after reducing the number of devices, we
much ensure to initialise the whole entry, not just the part that
is currently relevant.  Otherwise if we enlarge the array again,
we will reference uninitialised values.

As grow_buffers/shrink_buffer now want to use a count that is stored
explicity in the raid_conf, they should get it from there rather than
being passed it as a parameter.

Signed-off-by: NeilBrown <neilb@suse.de>
  • Loading branch information
NeilBrown committed Jun 24, 2010
1 parent 049d6c1 commit e4e11e3
Showing 1 changed file with 10 additions and 9 deletions.
19 changes: 10 additions & 9 deletions drivers/md/raid5.c
Original file line number Diff line number Diff line change
Expand Up @@ -277,12 +277,13 @@ static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
return sh;
}

static void shrink_buffers(struct stripe_head *sh, int num)
static void shrink_buffers(struct stripe_head *sh)
{
struct page *p;
int i;
int num = sh->raid_conf->pool_size;

for (i=0; i<num ; i++) {
for (i = 0; i < num ; i++) {
p = sh->dev[i].page;
if (!p)
continue;
Expand All @@ -291,11 +292,12 @@ static void shrink_buffers(struct stripe_head *sh, int num)
}
}

static int grow_buffers(struct stripe_head *sh, int num)
static int grow_buffers(struct stripe_head *sh)
{
int i;
int num = sh->raid_conf->pool_size;

for (i=0; i<num; i++) {
for (i = 0; i < num; i++) {
struct page *page;

if (!(page = alloc_page(GFP_KERNEL))) {
Expand Down Expand Up @@ -1240,19 +1242,18 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
static int grow_one_stripe(raid5_conf_t *conf)
{
struct stripe_head *sh;
int disks = max(conf->raid_disks, conf->previous_raid_disks);
sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
if (!sh)
return 0;
memset(sh, 0, sizeof(*sh) + (disks-1)*sizeof(struct r5dev));
memset(sh, 0, sizeof(*sh) + (conf->pool_size-1)*sizeof(struct r5dev));
sh->raid_conf = conf;
spin_lock_init(&sh->lock);
#ifdef CONFIG_MULTICORE_RAID456
init_waitqueue_head(&sh->ops.wait_for_ops);
#endif

if (grow_buffers(sh, disks)) {
shrink_buffers(sh, disks);
if (grow_buffers(sh)) {
shrink_buffers(sh);
kmem_cache_free(conf->slab_cache, sh);
return 0;
}
Expand Down Expand Up @@ -1468,7 +1469,7 @@ static int drop_one_stripe(raid5_conf_t *conf)
if (!sh)
return 0;
BUG_ON(atomic_read(&sh->count));
shrink_buffers(sh, conf->pool_size);
shrink_buffers(sh);
kmem_cache_free(conf->slab_cache, sh);
atomic_dec(&conf->active_stripes);
return 1;
Expand Down

0 comments on commit e4e11e3

Please sign in to comment.