Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 345666
b: refs/heads/master
c: 3f3299d
h: refs/heads/master
v: v3
  • Loading branch information
Bart Van Assche authored and Jens Axboe committed Dec 6, 2012
1 parent d371da9 commit 5a5cfa4
Show file tree
Hide file tree
Showing 9 changed files with 23 additions and 23 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8fa72d234da9b6b473bbb1f74d533663e4996e6b
refs/heads/master: 3f3299d5c0268d6cc3f47b446e8aca436e4a5651
2 changes: 1 addition & 1 deletion trunk/block/blk-cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
return __blkg_lookup_create(blkcg, q, NULL);
}
EXPORT_SYMBOL_GPL(blkg_lookup_create);
Expand Down
26 changes: 13 additions & 13 deletions trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -473,20 +473,20 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
* Mark @q DEAD, drain all pending requests, destroy and put it. All
* Mark @q DYING, drain all pending requests, destroy and put it. All
* future requests will be failed immediately with -ENODEV.
*/
void blk_cleanup_queue(struct request_queue *q)
{
spinlock_t *lock = q->queue_lock;

/* mark @q DEAD, no new request or merges will be allowed afterwards */
/* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
spin_lock_irq(lock);

/*
* Dead queue is permanently in bypass mode till released. Note
* A dying queue is permanently in bypass mode till released. Note
* that, unlike blk_queue_bypass_start(), we aren't performing
* synchronize_rcu() after entering bypass mode to avoid the delay
* as some drivers create and destroy a lot of queues while
Expand All @@ -499,11 +499,11 @@ void blk_cleanup_queue(struct request_queue *q)

queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
queue_flag_set(QUEUE_FLAG_DEAD, q);
queue_flag_set(QUEUE_FLAG_DYING, q);
spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);

/* drain all requests queued before DEAD marking */
/* drain all requests queued before DYING marking */
blk_drain_queue(q, true);

/* @q won't process any more request, flush async actions */
Expand Down Expand Up @@ -716,7 +716,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);

bool blk_get_queue(struct request_queue *q)
{
if (likely(!blk_queue_dead(q))) {
if (likely(!blk_queue_dying(q))) {
__blk_get_queue(q);
return true;
}
Expand Down Expand Up @@ -870,7 +870,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
const bool is_sync = rw_is_sync(rw_flags) != 0;
int may_queue;

if (unlikely(blk_queue_dead(q)))
if (unlikely(blk_queue_dying(q)))
return NULL;

may_queue = elv_may_queue(q, rw_flags);
Expand Down Expand Up @@ -1050,7 +1050,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
if (rq)
return rq;

if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) {
if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
blk_put_rl(rl);
return NULL;
}
Expand Down Expand Up @@ -1910,7 +1910,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
return -EIO;

spin_lock_irqsave(q->queue_lock, flags);
if (unlikely(blk_queue_dead(q))) {
if (unlikely(blk_queue_dying(q))) {
spin_unlock_irqrestore(q->queue_lock, flags);
return -ENODEV;
}
Expand Down Expand Up @@ -2885,9 +2885,9 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
trace_block_unplug(q, depth, !from_schedule);

/*
* Don't mess with dead queue.
* Don't mess with a dying queue.
*/
if (unlikely(blk_queue_dead(q))) {
if (unlikely(blk_queue_dying(q))) {
spin_unlock(q->queue_lock);
return;
}
Expand Down Expand Up @@ -2996,7 +2996,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/*
* Short-circuit if @q is dead
*/
if (unlikely(blk_queue_dead(q))) {
if (unlikely(blk_queue_dying(q))) {
__blk_end_request_all(rq, -ENODEV);
continue;
}
Expand Down
2 changes: 1 addition & 1 deletion trunk/block/blk-exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,

spin_lock_irq(q->queue_lock);

if (unlikely(blk_queue_dead(q))) {
if (unlikely(blk_queue_dying(q))) {
rq->errors = -ENXIO;
if (rq->end_io)
rq->end_io(rq, rq->errors);
Expand Down
4 changes: 2 additions & 2 deletions trunk/block/blk-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -466,7 +466,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
mutex_lock(&q->sysfs_lock);
if (blk_queue_dead(q)) {
if (blk_queue_dying(q)) {
mutex_unlock(&q->sysfs_lock);
return -ENOENT;
}
Expand All @@ -488,7 +488,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,

q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock);
if (blk_queue_dead(q)) {
if (blk_queue_dying(q)) {
mutex_unlock(&q->sysfs_lock);
return -ENOENT;
}
Expand Down
2 changes: 1 addition & 1 deletion trunk/block/blk-throttle.c
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
/* if %NULL and @q is alive, fall back to root_tg */
if (!IS_ERR(blkg))
tg = blkg_to_tg(blkg);
else if (!blk_queue_dead(q))
else if (!blk_queue_dying(q))
tg = td_root_tg(td);
}

Expand Down
2 changes: 1 addition & 1 deletion trunk/block/blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
q->flush_queue_delayed = 1;
return NULL;
}
if (unlikely(blk_queue_dead(q)) ||
if (unlikely(blk_queue_dying(q)) ||
!q->elevator->type->ops.elevator_dispatch_fn(q, 0))
return NULL;
}
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/scsi/scsi_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -1406,7 +1406,7 @@ static int scsi_lld_busy(struct request_queue *q)
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;

if (blk_queue_dead(q))
if (blk_queue_dying(q))
return 0;

shost = sdev->host;
Expand Down
4 changes: 2 additions & 2 deletions trunk/include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -437,7 +437,7 @@ struct request_queue {
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define QUEUE_FLAG_DYING 5 /* queue being torn down */
#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
Expand Down Expand Up @@ -521,7 +521,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)

#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
Expand Down

0 comments on commit 5a5cfa4

Please sign in to comment.