Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 176080
b: refs/heads/master
c: a2826aa
h: refs/heads/master
v: v3
  • Loading branch information
NeilBrown committed Dec 14, 2009
1 parent 8161e60 commit b442552
Show file tree
Hide file tree
Showing 8 changed files with 127 additions and 8 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: efa593390e70b0e3c39f6b2dca8876b6b1461e41
refs/heads/master: a2826aa92e2e14db372eda01d333267258944033
2 changes: 1 addition & 1 deletion trunk/drivers/md/linear.c
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
int cpu;

if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
bio_endio(bio, -EOPNOTSUPP);
md_barrier_request(mddev, bio);
return 0;
}

Expand Down
105 changes: 103 additions & 2 deletions trunk/drivers/md/md.c
Original file line number Diff line number Diff line change
Expand Up @@ -213,12 +213,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
return 0;
}
rcu_read_lock();
if (mddev->suspended) {
if (mddev->suspended || mddev->barrier) {
DEFINE_WAIT(__wait);
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
TASK_UNINTERRUPTIBLE);
if (!mddev->suspended)
if (!mddev->suspended && !mddev->barrier)
break;
rcu_read_unlock();
schedule();
Expand Down Expand Up @@ -260,10 +260,110 @@ static void mddev_resume(mddev_t *mddev)

int mddev_congested(mddev_t *mddev, int bits)
{
if (mddev->barrier)
return 1;
return mddev->suspended;
}
EXPORT_SYMBOL(mddev_congested);

/*
* Generic barrier handling for md
*/

#define POST_REQUEST_BARRIER ((void*)1)

static void md_end_barrier(struct bio *bio, int err)
{
mdk_rdev_t *rdev = bio->bi_private;
mddev_t *mddev = rdev->mddev;
if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER)
set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags);

rdev_dec_pending(rdev, mddev);

if (atomic_dec_and_test(&mddev->flush_pending)) {
if (mddev->barrier == POST_REQUEST_BARRIER) {
/* This was a post-request barrier */
mddev->barrier = NULL;
wake_up(&mddev->sb_wait);
} else
/* The pre-request barrier has finished */
schedule_work(&mddev->barrier_work);
}
bio_put(bio);
}

static void submit_barriers(mddev_t *mddev)
{
mdk_rdev_t *rdev;

rcu_read_lock();
list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags)) {
/* Take two references, one is dropped
* when request finishes, one after
* we reclaim rcu_read_lock
*/
struct bio *bi;
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
bi = bio_alloc(GFP_KERNEL, 0);
bi->bi_end_io = md_end_barrier;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
atomic_inc(&mddev->flush_pending);
submit_bio(WRITE_BARRIER, bi);
rcu_read_lock();
rdev_dec_pending(rdev, mddev);
}
rcu_read_unlock();
}

static void md_submit_barrier(struct work_struct *ws)
{
mddev_t *mddev = container_of(ws, mddev_t, barrier_work);
struct bio *bio = mddev->barrier;

atomic_set(&mddev->flush_pending, 1);

if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
bio_endio(bio, -EOPNOTSUPP);
else if (bio->bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio, 0);
else {
bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
if (mddev->pers->make_request(mddev->queue, bio))
generic_make_request(bio);
mddev->barrier = POST_REQUEST_BARRIER;
submit_barriers(mddev);
}
if (atomic_dec_and_test(&mddev->flush_pending)) {
mddev->barrier = NULL;
wake_up(&mddev->sb_wait);
}
}

void md_barrier_request(mddev_t *mddev, struct bio *bio)
{
spin_lock_irq(&mddev->write_lock);
wait_event_lock_irq(mddev->sb_wait,
!mddev->barrier,
mddev->write_lock, /*nothing*/);
mddev->barrier = bio;
spin_unlock_irq(&mddev->write_lock);

atomic_set(&mddev->flush_pending, 1);
INIT_WORK(&mddev->barrier_work, md_submit_barrier);

submit_barriers(mddev);

if (atomic_dec_and_test(&mddev->flush_pending))
schedule_work(&mddev->barrier_work);
}
EXPORT_SYMBOL(md_barrier_request);

static inline mddev_t *mddev_get(mddev_t *mddev)
{
Expand Down Expand Up @@ -371,6 +471,7 @@ static mddev_t * mddev_find(dev_t unit)
atomic_set(&new->openers, 0);
atomic_set(&new->active_io, 0);
spin_lock_init(&new->write_lock);
atomic_set(&new->flush_pending, 0);
init_waitqueue_head(&new->sb_wait);
init_waitqueue_head(&new->recovery_wait);
new->reshape_position = MaxSector;
Expand Down
12 changes: 12 additions & 0 deletions trunk/drivers/md/md.h
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,17 @@ struct mddev_s
struct mutex bitmap_mutex;

struct list_head all_mddevs;

/* Generic barrier handling.
* If there is a pending barrier request, all other
* writes are blocked while the devices are flushed.
* The last to finish a flush schedules a worker to
* submit the barrier request (without the barrier flag),
* then submit more flush requests.
*/
struct bio *barrier;
atomic_t flush_pending;
struct work_struct barrier_work;
};


Expand Down Expand Up @@ -432,6 +443,7 @@ extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);

extern int mddev_congested(mddev_t *mddev, int bits);
extern void md_barrier_request(mddev_t *mddev, struct bio *bio);
extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
sector_t sector, int size, struct page *page);
extern void md_super_wait(mddev_t *mddev);
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/md/multipath.c
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
int cpu;

if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
bio_endio(bio, -EOPNOTSUPP);
md_barrier_request(mddev, bio);
return 0;
}

Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/md/raid0.c
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
int cpu;

if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
bio_endio(bio, -EOPNOTSUPP);
md_barrier_request(mddev, bio);
return 0;
}

Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/md/raid10.c
Original file line number Diff line number Diff line change
Expand Up @@ -804,7 +804,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
mdk_rdev_t *blocked_rdev;

if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
bio_endio(bio, -EOPNOTSUPP);
md_barrier_request(mddev, bio);
return 0;
}

Expand Down
8 changes: 7 additions & 1 deletion trunk/drivers/md/raid5.c
Original file line number Diff line number Diff line change
Expand Up @@ -3865,7 +3865,13 @@ static int make_request(struct request_queue *q, struct bio * bi)
int cpu, remaining;

if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
bio_endio(bi, -EOPNOTSUPP);
/* Drain all pending writes. We only really need
* to ensure they have been submitted, but this is
* easier.
*/
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
md_barrier_request(mddev, bi);
return 0;
}

Expand Down

0 comments on commit b442552

Please sign in to comment.