diff --git a/[refs] b/[refs] index dedbf605e74f..f5096c7fe3e2 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 75eb6c372d41d6d140b893873f6687d78c987a44 +refs/heads/master: 315fceee81155ef2aeed9316ca72aeea9347db5c diff --git a/trunk/block/blk-throttle.c b/trunk/block/blk-throttle.c index f3f495ea4eeb..ecba5fcef201 100644 --- a/trunk/block/blk-throttle.c +++ b/trunk/block/blk-throttle.c @@ -324,12 +324,8 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) /* * Need to allocate a group. Allocation of group also needs allocation * of per cpu stats which in-turn takes a mutex() and can block. Hence - * we need to drop rcu lock and queue_lock before we call alloc - * - * Take the request queue reference to make sure queue does not - * go away once we return from allocation. + * we need to drop rcu lock and queue_lock before we call alloc. */ - blk_get_queue(q); rcu_read_unlock(); spin_unlock_irq(q->queue_lock); @@ -339,13 +335,11 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) * dead */ if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { - blk_put_queue(q); if (tg) kfree(tg); return ERR_PTR(-ENODEV); } - blk_put_queue(q); /* Group allocated and queue is still alive. take the lock */ spin_lock_irq(q->queue_lock); diff --git a/trunk/block/scsi_ioctl.c b/trunk/block/scsi_ioctl.c index 4f4230b79bb6..fbdf0d802ec4 100644 --- a/trunk/block/scsi_ioctl.c +++ b/trunk/block/scsi_ioctl.c @@ -565,7 +565,7 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod { int err; - if (!q || blk_get_queue(q)) + if (!q) return -ENXIO; switch (cmd) { @@ -686,7 +686,6 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod err = -ENOTTY; } - blk_put_queue(q); return err; } EXPORT_SYMBOL(scsi_cmd_ioctl);