Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 309199
b: refs/heads/master
c: 0a5a7d0
h: refs/heads/master
i:
  309197: b098035
  309195: 64f0a47
  309191: 6b432ed
  309183: ba1b136
v: v3
  • Loading branch information
Tejun Heo authored and Jens Axboe committed Mar 6, 2012
1 parent db728b7 commit 841e6bb
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 18 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2a7f124414b35645049e9c1b125a6f0b470aa5ae
refs/heads/master: 0a5a7d0e32be6643b881f0e7cd9d0d06fadde27a
16 changes: 7 additions & 9 deletions trunk/block/blk-throttle.c
Original file line number Diff line number Diff line change
Expand Up @@ -303,21 +303,23 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
return tg;
}

static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
static struct throtl_grp *throtl_get_tg(struct throtl_data *td,
struct blkio_cgroup *blkcg)
{
struct throtl_grp *tg = NULL, *__tg = NULL;
struct blkio_cgroup *blkcg;
struct request_queue *q = td->queue;

/* no throttling for dead queue */
if (unlikely(blk_queue_bypass(q)))
return NULL;

blkcg = task_blkio_cgroup(current);
tg = throtl_find_tg(td, blkcg);
if (tg)
return tg;

if (!css_tryget(&blkcg->css))
return NULL;

/*
* Need to allocate a group. Allocation of group also needs allocation
* of per cpu stats which in-turn takes a mutex() and can block. Hence
Expand All @@ -331,18 +333,14 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
/* Group allocated and queue is still alive. take the lock */
rcu_read_lock();
spin_lock_irq(q->queue_lock);
css_put(&blkcg->css);

/* Make sure @q is still alive */
if (unlikely(blk_queue_bypass(q))) {
kfree(tg);
return NULL;
}

/*
* Initialize the new group. After sleeping, read the blkcg again.
*/
blkcg = task_blkio_cgroup(current);

/*
* If some other thread already allocated the group while we were
* not holding queue lock, free up the group
Expand Down Expand Up @@ -1163,7 +1161,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
* IO group
*/
spin_lock_irq(q->queue_lock);
tg = throtl_get_tg(td);
tg = throtl_get_tg(td, blkcg);
if (unlikely(!tg))
goto out_unlock;

Expand Down
20 changes: 12 additions & 8 deletions trunk/block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1122,17 +1122,19 @@ cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
* Search for the cfq group current task belongs to. request_queue lock must
* be held.
*/
static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd,
struct blkio_cgroup *blkcg)
{
struct blkio_cgroup *blkcg;
struct cfq_group *cfqg = NULL, *__cfqg = NULL;
struct request_queue *q = cfqd->queue;

blkcg = task_blkio_cgroup(current);
cfqg = cfq_find_cfqg(cfqd, blkcg);
if (cfqg)
return cfqg;

if (!css_tryget(&blkcg->css))
return NULL;

/*
* Need to allocate a group. Allocation of group also needs allocation
* of per cpu stats which in-turn takes a mutex() and can block. Hence
Expand All @@ -1142,16 +1144,14 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
* around by the time we return. CFQ queue allocation code does
* the same. It might be racy though.
*/

rcu_read_unlock();
spin_unlock_irq(q->queue_lock);

cfqg = cfq_alloc_cfqg(cfqd);

spin_lock_irq(q->queue_lock);

rcu_read_lock();
blkcg = task_blkio_cgroup(current);
css_put(&blkcg->css);

/*
* If some other thread already allocated the group while we were
Expand Down Expand Up @@ -1278,7 +1278,8 @@ static bool cfq_clear_queue(struct request_queue *q)
}

#else /* GROUP_IOSCHED */
static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd,
struct blkio_cgroup *blkcg)
{
return &cfqd->root_group;
}
Expand Down Expand Up @@ -2860,14 +2861,17 @@ static struct cfq_queue *
cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
struct io_context *ioc, gfp_t gfp_mask)
{
struct blkio_cgroup *blkcg;
struct cfq_queue *cfqq, *new_cfqq = NULL;
struct cfq_io_cq *cic;
struct cfq_group *cfqg;

retry:
rcu_read_lock();

cfqg = cfq_get_cfqg(cfqd);
blkcg = task_blkio_cgroup(current);

cfqg = cfq_get_cfqg(cfqd, blkcg);
cic = cfq_cic_lookup(cfqd, ioc);
/* cic always exists here */
cfqq = cic_to_cfqq(cic, is_sync);
Expand Down

0 comments on commit 841e6bb

Please sign in to comment.