Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 285118
b: refs/heads/master
c: f1f8cc9
h: refs/heads/master
v: v3
  • Loading branch information
Tejun Heo authored and Jens Axboe committed Dec 13, 2011
1 parent e95cc15 commit bb5af53
Show file tree
Hide file tree
Showing 7 changed files with 174 additions and 137 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9b84cacd013996f244d85b3d873287c2a8f88658
refs/heads/master: f1f8cc94651738b418ba54c039df536303b91704
46 changes: 39 additions & 7 deletions trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -640,13 +640,18 @@ EXPORT_SYMBOL(blk_get_queue);

static inline void blk_free_request(struct request_queue *q, struct request *rq)
{
if (rq->cmd_flags & REQ_ELVPRIV)
if (rq->cmd_flags & REQ_ELVPRIV) {
elv_put_request(q, rq);
if (rq->elv.icq)
put_io_context(rq->elv.icq->ioc, q);
}

mempool_free(rq, q->rq.rq_pool);
}

static struct request *
blk_alloc_request(struct request_queue *q, unsigned int flags, gfp_t gfp_mask)
blk_alloc_request(struct request_queue *q, struct io_cq *icq,
unsigned int flags, gfp_t gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);

Expand All @@ -657,10 +662,15 @@ blk_alloc_request(struct request_queue *q, unsigned int flags, gfp_t gfp_mask)

rq->cmd_flags = flags | REQ_ALLOCED;

if ((flags & REQ_ELVPRIV) &&
unlikely(elv_set_request(q, rq, gfp_mask))) {
mempool_free(rq, q->rq.rq_pool);
return NULL;
if (flags & REQ_ELVPRIV) {
rq->elv.icq = icq;
if (unlikely(elv_set_request(q, rq, gfp_mask))) {
mempool_free(rq, q->rq.rq_pool);
return NULL;
}
/* @rq->elv.icq holds on to io_context until @rq is freed */
if (icq)
get_io_context(icq->ioc);
}

return rq;
Expand Down Expand Up @@ -772,11 +782,14 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct elevator_type *et;
struct io_context *ioc;
struct io_cq *icq = NULL;
const bool is_sync = rw_is_sync(rw_flags) != 0;
bool retried = false;
int may_queue;
retry:
et = q->elevator->type;
ioc = current->io_context;

if (unlikely(blk_queue_dead(q)))
Expand Down Expand Up @@ -837,17 +850,36 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
rl->count[is_sync]++;
rl->starved[is_sync] = 0;

/*
* Decide whether the new request will be managed by elevator. If
* so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will
* prevent the current elevator from being destroyed until the new
* request is freed. This guarantees icq's won't be destroyed and
* makes creating new ones safe.
*
* Also, lookup icq while holding queue_lock. If it doesn't exist,
* it will be created after releasing queue_lock.
*/
if (blk_rq_should_init_elevator(bio) &&
!test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
rw_flags |= REQ_ELVPRIV;
rl->elvpriv++;
if (et->icq_cache && ioc)
icq = ioc_lookup_icq(ioc, q);
}

if (blk_queue_io_stat(q))
rw_flags |= REQ_IO_STAT;
spin_unlock_irq(q->queue_lock);

rq = blk_alloc_request(q, rw_flags, gfp_mask);
/* create icq if missing */
if (unlikely(et->icq_cache && !icq))
icq = ioc_create_icq(q, gfp_mask);

/* rqs are guaranteed to have icq on elv_set_request() if requested */
if (likely(!et->icq_cache || icq))
rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);

if (unlikely(!rq)) {
/*
* Allocation failed presumably due to memory. Undo anything
Expand Down
60 changes: 59 additions & 1 deletion trunk/block/blk-ioc.c
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,6 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
kmem_cache_free(iocontext_cachep, ioc);
task_unlock(task);
}
EXPORT_SYMBOL(create_io_context_slowpath);

/**
* get_task_io_context - get io_context of a task
Expand Down Expand Up @@ -362,6 +361,65 @@ struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
}
EXPORT_SYMBOL(ioc_lookup_icq);

/**
* ioc_create_icq - create and link io_cq
* @q: request_queue of interest
* @gfp_mask: allocation mask
*
* Make sure io_cq linking %current->io_context and @q exists. If either
* io_context and/or icq don't exist, they will be created using @gfp_mask.
*
* The caller is responsible for ensuring @ioc won't go away and @q is
* alive and will stay alive until this function returns.
*/
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
{
struct elevator_type *et = q->elevator->type;
struct io_context *ioc;
struct io_cq *icq;

/* allocate stuff */
ioc = create_io_context(current, gfp_mask, q->node);
if (!ioc)
return NULL;

icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
q->node);
if (!icq)
return NULL;

if (radix_tree_preload(gfp_mask) < 0) {
kmem_cache_free(et->icq_cache, icq);
return NULL;
}

icq->ioc = ioc;
icq->q = q;
INIT_LIST_HEAD(&icq->q_node);
INIT_HLIST_NODE(&icq->ioc_node);

/* lock both q and ioc and try to link @icq */
spin_lock_irq(q->queue_lock);
spin_lock(&ioc->lock);

if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
hlist_add_head(&icq->ioc_node, &ioc->icq_list);
list_add(&icq->q_node, &q->icq_list);
if (et->ops.elevator_init_icq_fn)
et->ops.elevator_init_icq_fn(icq);
} else {
kmem_cache_free(et->icq_cache, icq);
icq = ioc_lookup_icq(ioc, q);
if (!icq)
printk(KERN_ERR "cfq: icq link failed!\n");
}

spin_unlock(&ioc->lock);
spin_unlock_irq(q->queue_lock);
radix_tree_preload_end();
return icq;
}

void ioc_set_changed(struct io_context *ioc, int which)
{
struct io_cq *icq;
Expand Down
1 change: 1 addition & 0 deletions trunk/block/blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,7 @@ static inline int blk_do_io_stat(struct request *rq)
*/
void get_io_context(struct io_context *ioc);
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
void ioc_clear_queue(struct request_queue *q);

void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
Expand Down
135 changes: 11 additions & 124 deletions trunk/block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2935,117 +2935,6 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
return cfqq;
}

/**
* ioc_create_icq - create and link io_cq
* @q: request_queue of interest
* @gfp_mask: allocation mask
*
* Make sure io_cq linking %current->io_context and @q exists. If either
* io_context and/or icq don't exist, they will be created using @gfp_mask.
*
* The caller is responsible for ensuring @ioc won't go away and @q is
* alive and will stay alive until this function returns.
*/
static struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
{
struct elevator_type *et = q->elevator->type;
struct io_context *ioc;
struct io_cq *icq;

/* allocate stuff */
ioc = create_io_context(current, gfp_mask, q->node);
if (!ioc)
return NULL;

icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
q->node);
if (!icq)
return NULL;

if (radix_tree_preload(gfp_mask) < 0) {
kmem_cache_free(et->icq_cache, icq);
return NULL;
}

icq->ioc = ioc;
icq->q = q;
INIT_LIST_HEAD(&icq->q_node);
INIT_HLIST_NODE(&icq->ioc_node);

/* lock both q and ioc and try to link @icq */
spin_lock_irq(q->queue_lock);
spin_lock(&ioc->lock);

if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
hlist_add_head(&icq->ioc_node, &ioc->icq_list);
list_add(&icq->q_node, &q->icq_list);
if (et->ops.elevator_init_icq_fn)
et->ops.elevator_init_icq_fn(icq);
} else {
kmem_cache_free(et->icq_cache, icq);
icq = ioc_lookup_icq(ioc, q);
if (!icq)
printk(KERN_ERR "cfq: icq link failed!\n");
}

spin_unlock(&ioc->lock);
spin_unlock_irq(q->queue_lock);
radix_tree_preload_end();
return icq;
}

/**
* cfq_get_cic - acquire cfq_io_cq and bump refcnt on io_context
* @cfqd: cfqd to setup cic for
* @gfp_mask: allocation mask
*
* Return cfq_io_cq associating @cfqd and %current->io_context and
* bump refcnt on io_context. If ioc or cic doesn't exist, they're created
* using @gfp_mask.
*
* Must be called under queue_lock which may be released and re-acquired.
* This function also may sleep depending on @gfp_mask.
*/
static struct cfq_io_cq *cfq_get_cic(struct cfq_data *cfqd, gfp_t gfp_mask)
{
struct request_queue *q = cfqd->queue;
struct cfq_io_cq *cic = NULL;
struct io_context *ioc;

lockdep_assert_held(q->queue_lock);

while (true) {
/* fast path */
ioc = current->io_context;
if (likely(ioc)) {
cic = cfq_cic_lookup(cfqd, ioc);
if (likely(cic))
break;
}

/* slow path - unlock, create missing ones and retry */
spin_unlock_irq(q->queue_lock);
cic = icq_to_cic(ioc_create_icq(q, gfp_mask));
spin_lock_irq(q->queue_lock);
if (!cic)
return NULL;
}

/* bump @ioc's refcnt and handle changed notifications */
get_io_context(ioc);

if (unlikely(cic->icq.changed)) {
if (test_and_clear_bit(ICQ_IOPRIO_CHANGED, &cic->icq.changed))
changed_ioprio(cic);
#ifdef CONFIG_CFQ_GROUP_IOSCHED
if (test_and_clear_bit(ICQ_CGROUP_CHANGED, &cic->icq.changed))
changed_cgroup(cic);
#endif
}

return cic;
}

static void
__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
{
Expand Down Expand Up @@ -3524,8 +3413,6 @@ static void cfq_put_request(struct request *rq)
BUG_ON(!cfqq->allocated[rw]);
cfqq->allocated[rw]--;

put_io_context(RQ_CIC(rq)->icq.ioc, cfqq->cfqd->queue);

/* Put down rq reference on cfqg */
cfq_put_cfqg(RQ_CFQG(rq));
rq->elv.priv[0] = NULL;
Expand Down Expand Up @@ -3574,17 +3461,24 @@ static int
cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_io_cq *cic;
struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
const int rw = rq_data_dir(rq);
const bool is_sync = rq_is_sync(rq);
struct cfq_queue *cfqq;

might_sleep_if(gfp_mask & __GFP_WAIT);

spin_lock_irq(q->queue_lock);
cic = cfq_get_cic(cfqd, gfp_mask);
if (!cic)
goto queue_fail;

/* handle changed notifications */
if (unlikely(cic->icq.changed)) {
if (test_and_clear_bit(ICQ_IOPRIO_CHANGED, &cic->icq.changed))
changed_ioprio(cic);
#ifdef CONFIG_CFQ_GROUP_IOSCHED
if (test_and_clear_bit(ICQ_CGROUP_CHANGED, &cic->icq.changed))
changed_cgroup(cic);
#endif
}

new_queue:
cfqq = cic_to_cfqq(cic, is_sync);
Expand Down Expand Up @@ -3615,17 +3509,10 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
cfqq->allocated[rw]++;

cfqq->ref++;
rq->elv.icq = &cic->icq;
rq->elv.priv[0] = cfqq;
rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
spin_unlock_irq(q->queue_lock);
return 0;

queue_fail:
cfq_schedule_dispatch(cfqd);
spin_unlock_irq(q->queue_lock);
cfq_log(cfqd, "set_request fail");
return 1;
}

static void cfq_kick_queue(struct work_struct *work)
Expand Down
8 changes: 4 additions & 4 deletions trunk/include/linux/elevator.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ struct elevator_ops
elevator_request_list_fn *elevator_former_req_fn;
elevator_request_list_fn *elevator_latter_req_fn;

elevator_init_icq_fn *elevator_init_icq_fn;
elevator_exit_icq_fn *elevator_exit_icq_fn;
elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */
elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */

elevator_set_req_fn *elevator_set_req_fn;
elevator_put_req_fn *elevator_put_req_fn;
Expand Down Expand Up @@ -90,8 +90,8 @@ struct elevator_type

/* fields provided by elevator implementation */
struct elevator_ops ops;
size_t icq_size;
size_t icq_align;
size_t icq_size; /* see iocontext.h */
size_t icq_align; /* ditto */
struct elv_fs_entry *elevator_attrs;
char elevator_name[ELV_NAME_MAX];
struct module *elevator_owner;
Expand Down
Loading

0 comments on commit bb5af53

Please sign in to comment.