Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 273802
b: refs/heads/master
c: c9a929d
h: refs/heads/master
v: v3
  • Loading branch information
Tejun Heo authored and Jens Axboe committed Oct 19, 2011
1 parent 44cb8bf commit 33d85dc
Show file tree
Hide file tree
Showing 6 changed files with 88 additions and 30 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: bd87b5898a72b1aef6acf3705c61c9f6372adf0c
refs/heads/master: c9a929dde3913780b5c416f4bb9d9ed804f509ce
57 changes: 36 additions & 21 deletions trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -349,21 +349,29 @@ EXPORT_SYMBOL(blk_put_queue);
/**
* blk_drain_queue - drain requests from request_queue
* @q: queue to drain
* @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
*
* Drain ELV_PRIV requests from @q. The caller is responsible for ensuring
* that no new requests which need to be drained are queued.
* Drain requests from @q. If @drain_all is set, all requests are drained.
* If not, only ELVPRIV requests are drained. The caller is responsible
* for ensuring that no new requests which need to be drained are queued.
*/
void blk_drain_queue(struct request_queue *q)
void blk_drain_queue(struct request_queue *q, bool drain_all)
{
while (true) {
int nr_rqs;

spin_lock_irq(q->queue_lock);

elv_drain_elevator(q);
if (drain_all)
blk_throtl_drain(q);

__blk_run_queue(q);
nr_rqs = q->rq.elvpriv;

if (drain_all)
nr_rqs = q->rq.count[0] + q->rq.count[1];
else
nr_rqs = q->rq.elvpriv;

spin_unlock_irq(q->queue_lock);

Expand All @@ -373,30 +381,40 @@ void blk_drain_queue(struct request_queue *q)
}
}

/*
* Note: If a driver supplied the queue lock, it is disconnected
* by this function. The actual state of the lock doesn't matter
* here as the request_queue isn't accessible after this point
* (QUEUE_FLAG_DEAD is set) and no other requests will be queued.
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
* Mark @q DEAD, drain all pending requests, destroy and put it. All
* future requests will be failed immediately with -ENODEV.
*/
void blk_cleanup_queue(struct request_queue *q)
{
/*
* We know we have process context here, so we can be a little
* cautious and ensure that pending block actions on this device
* are done before moving on. Going into this function, we should
* not have processes doing IO to this device.
*/
blk_sync_queue(q);
spinlock_t *lock = q->queue_lock;

del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
/* mark @q DEAD, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
mutex_unlock(&q->sysfs_lock);

spin_lock_irq(lock);
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
queue_flag_set(QUEUE_FLAG_DEAD, q);

if (q->queue_lock != &q->__queue_lock)
q->queue_lock = &q->__queue_lock;

spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);

/* drain all requests queued before DEAD marking */
blk_drain_queue(q, true);

/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
blk_sync_queue(q);

/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
EXPORT_SYMBOL(blk_cleanup_queue);
Expand Down Expand Up @@ -1509,9 +1527,6 @@ generic_make_request_checks(struct bio *bio)
goto end_io;
}

if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
goto end_io;

part = bio->bi_bdev->bd_part;
if (should_fail_request(part, bio->bi_size) ||
should_fail_request(&part_to_disk(part)->part0,
Expand Down
1 change: 1 addition & 0 deletions trunk/block/blk-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -490,6 +490,7 @@ static void blk_release_queue(struct kobject *kobj)
if (q->queue_tags)
__blk_queue_free_tags(q);

blk_throtl_release(q);
blk_trace_shutdown(q);

bdi_destroy(&q->backing_dev_info);
Expand Down
50 changes: 44 additions & 6 deletions trunk/block/blk-throttle.c
Original file line number Diff line number Diff line change
Expand Up @@ -309,6 +309,10 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
struct blkio_cgroup *blkcg;
struct request_queue *q = td->queue;

/* no throttling for dead queue */
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
return NULL;

rcu_read_lock();
blkcg = task_blkio_cgroup(current);
tg = throtl_find_tg(td, blkcg);
Expand Down Expand Up @@ -1001,11 +1005,6 @@ static void throtl_release_tgs(struct throtl_data *td)
}
}

static void throtl_td_free(struct throtl_data *td)
{
kfree(td);
}

/*
* Blk cgroup controller notification saying that blkio_group object is being
* delinked as associated cgroup object is going away. That also means that
Expand Down Expand Up @@ -1204,6 +1203,41 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
return throttled;
}

/**
* blk_throtl_drain - drain throttled bios
* @q: request_queue to drain throttled bios for
*
* Dispatch all currently throttled bios on @q through ->make_request_fn().
*/
void blk_throtl_drain(struct request_queue *q)
__releases(q->queue_lock) __acquires(q->queue_lock)
{
struct throtl_data *td = q->td;
struct throtl_rb_root *st = &td->tg_service_tree;
struct throtl_grp *tg;
struct bio_list bl;
struct bio *bio;

lockdep_is_held(q->queue_lock);

bio_list_init(&bl);

while ((tg = throtl_rb_first(st))) {
throtl_dequeue_tg(td, tg);

while ((bio = bio_list_peek(&tg->bio_lists[READ])))
tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
}
spin_unlock_irq(q->queue_lock);

while ((bio = bio_list_pop(&bl)))
generic_make_request(bio);

spin_lock_irq(q->queue_lock);
}

int blk_throtl_init(struct request_queue *q)
{
struct throtl_data *td;
Expand Down Expand Up @@ -1276,7 +1310,11 @@ void blk_throtl_exit(struct request_queue *q)
* it.
*/
throtl_shutdown_wq(q);
throtl_td_free(td);
}

void blk_throtl_release(struct request_queue *q)
{
kfree(q->td);
}

static int __init throtl_init(void)
Expand Down
6 changes: 5 additions & 1 deletion trunk/block/blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio);
void blk_drain_queue(struct request_queue *q);
void blk_drain_queue(struct request_queue *q, bool drain_all);
void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
bool __blk_end_bidi_request(struct request *rq, int error,
Expand Down Expand Up @@ -191,15 +191,19 @@ static inline int blk_do_io_stat(struct request *rq)

#ifdef CONFIG_BLK_DEV_THROTTLING
extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
extern void blk_throtl_drain(struct request_queue *q);
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
extern void blk_throtl_release(struct request_queue *q);
#else /* CONFIG_BLK_DEV_THROTTLING */
static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{
return false;
}
static inline void blk_throtl_drain(struct request_queue *q) { }
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
static inline void blk_throtl_release(struct request_queue *q) { }
#endif /* CONFIG_BLK_DEV_THROTTLING */

#endif /* BLK_INTERNAL_H */
2 changes: 1 addition & 1 deletion trunk/block/elevator.c
Original file line number Diff line number Diff line change
Expand Up @@ -626,7 +626,7 @@ void elv_quiesce_start(struct request_queue *q)
queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
spin_unlock_irq(q->queue_lock);

blk_drain_queue(q);
blk_drain_queue(q, false);
}

void elv_quiesce_end(struct request_queue *q)
Expand Down

0 comments on commit 33d85dc

Please sign in to comment.