Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 320892
b: refs/heads/master
c: b17459c
h: refs/heads/master
v: v3
  • Loading branch information
Shaohua Li authored and NeilBrown committed Jul 19, 2012
1 parent 5d669cc commit c56a059
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 17 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7eaf7e8eb31747e4259d60288b44b194fb3d56c7
refs/heads/master: b17459c05000fdbe8d10946570a26510f86ec0f6
35 changes: 19 additions & 16 deletions trunk/drivers/md/raid5.c
Original file line number Diff line number Diff line change
Expand Up @@ -762,14 +762,12 @@ static void ops_complete_biofill(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
struct bio *return_bi = NULL;
struct r5conf *conf = sh->raid_conf;
int i;

pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);

/* clear completed biofills */
spin_lock_irq(&conf->device_lock);
for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];

Expand All @@ -795,7 +793,6 @@ static void ops_complete_biofill(void *stripe_head_ref)
}
}
}
spin_unlock_irq(&conf->device_lock);
clear_bit(STRIPE_BIOFILL_RUN, &sh->state);

return_io(return_bi);
Expand All @@ -807,7 +804,6 @@ static void ops_complete_biofill(void *stripe_head_ref)
static void ops_run_biofill(struct stripe_head *sh)
{
struct dma_async_tx_descriptor *tx = NULL;
struct r5conf *conf = sh->raid_conf;
struct async_submit_ctl submit;
int i;

Expand All @@ -818,10 +814,10 @@ static void ops_run_biofill(struct stripe_head *sh)
struct r5dev *dev = &sh->dev[i];
if (test_bit(R5_Wantfill, &dev->flags)) {
struct bio *rbi;
spin_lock_irq(&conf->device_lock);
spin_lock_irq(&sh->stripe_lock);
dev->read = rbi = dev->toread;
dev->toread = NULL;
spin_unlock_irq(&conf->device_lock);
spin_unlock_irq(&sh->stripe_lock);
while (rbi && rbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
tx = async_copy_data(0, rbi, dev->page,
Expand Down Expand Up @@ -1157,12 +1153,12 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
struct bio *wbi;

spin_lock_irq(&sh->raid_conf->device_lock);
spin_lock_irq(&sh->stripe_lock);
chosen = dev->towrite;
dev->towrite = NULL;
BUG_ON(dev->written);
wbi = dev->written = chosen;
spin_unlock_irq(&sh->raid_conf->device_lock);
spin_unlock_irq(&sh->stripe_lock);

while (wbi && wbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
Expand Down Expand Up @@ -1467,6 +1463,8 @@ static int grow_one_stripe(struct r5conf *conf)
init_waitqueue_head(&sh->ops.wait_for_ops);
#endif

spin_lock_init(&sh->stripe_lock);

if (grow_buffers(sh)) {
shrink_buffers(sh);
kmem_cache_free(conf->slab_cache, sh);
Expand Down Expand Up @@ -2353,8 +2351,15 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
(unsigned long long)bi->bi_sector,
(unsigned long long)sh->sector);


spin_lock_irq(&conf->device_lock);
/*
* If several bio share a stripe. The bio bi_phys_segments acts as a
* reference count to avoid race. The reference count should already be
* increased before this function is called (for example, in
* make_request()), so other bio sharing this stripe will not free the
* stripe. If a stripe is owned by one stripe, the stripe lock will
* protect it.
*/
spin_lock_irq(&sh->stripe_lock);
if (forwrite) {
bip = &sh->dev[dd_idx].towrite;
if (*bip == NULL)
Expand Down Expand Up @@ -2388,7 +2393,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
}
spin_unlock_irq(&conf->device_lock);
spin_unlock_irq(&sh->stripe_lock);

pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
(unsigned long long)(*bip)->bi_sector,
Expand All @@ -2404,7 +2409,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in

overlap:
set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
spin_unlock_irq(&conf->device_lock);
spin_unlock_irq(&sh->stripe_lock);
return 0;
}

Expand Down Expand Up @@ -2454,11 +2459,11 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
rdev_dec_pending(rdev, conf->mddev);
}
}
spin_lock_irq(&conf->device_lock);
spin_lock_irq(&sh->stripe_lock);
/* fail all writes first */
bi = sh->dev[i].towrite;
sh->dev[i].towrite = NULL;
spin_unlock_irq(&conf->device_lock);
spin_unlock_irq(&sh->stripe_lock);
if (bi) {
s->to_write--;
bitmap_end = 1;
Expand Down Expand Up @@ -3192,7 +3197,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)

/* Now to look around and see what can be done */
rcu_read_lock();
spin_lock_irq(&conf->device_lock);
for (i=disks; i--; ) {
struct md_rdev *rdev;
sector_t first_bad;
Expand Down Expand Up @@ -3338,7 +3342,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
do_recovery = 1;
}
}
spin_unlock_irq(&conf->device_lock);
if (test_bit(STRIPE_SYNCING, &sh->state)) {
/* If there is a failed device being replaced,
* we must be recovering.
Expand Down
1 change: 1 addition & 0 deletions trunk/drivers/md/raid5.h
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,7 @@ struct stripe_head {
int disks; /* disks in stripe */
enum check_states check_state;
enum reconstruct_states reconstruct_state;
spinlock_t stripe_lock;
/**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target
Expand Down

0 comments on commit c56a059

Please sign in to comment.