Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 285109
b: refs/heads/master
c: f2dbd76
h: refs/heads/master
i:
  285107: afecf7d
v: v3
  • Loading branch information
Tejun Heo authored and Jens Axboe committed Dec 13, 2011
1 parent 8858071 commit 1a7bf8c
Show file tree
Hide file tree
Showing 5 changed files with 72 additions and 55 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1238033c79e92e5c315af12e45396f1a78c73dec
refs/heads/master: f2dbd76a0a994bc1d5a3d0e7c844cc373832e86c
25 changes: 20 additions & 5 deletions trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -771,9 +771,12 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = NULL;
struct io_context *ioc;
const bool is_sync = rw_is_sync(rw_flags) != 0;
bool retried = false;
int may_queue;
retry:
ioc = current->io_context;

if (unlikely(blk_queue_dead(q)))
return NULL;
Expand All @@ -784,7 +787,20 @@ static struct request *get_request(struct request_queue *q, int rw_flags,

if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
if (rl->count[is_sync]+1 >= q->nr_requests) {
ioc = current_io_context(GFP_ATOMIC, q->node);
/*
* We want ioc to record batching state. If it's
* not already there, creating a new one requires
* dropping queue_lock, which in turn requires
* retesting conditions to avoid queue hang.
*/
if (!ioc && !retried) {
spin_unlock_irq(q->queue_lock);
create_io_context(current, gfp_mask, q->node);
spin_lock_irq(q->queue_lock);
retried = true;
goto retry;
}

/*
* The queue will fill after this allocation, so set
* it as full, and mark this process as "batching".
Expand Down Expand Up @@ -892,7 +908,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
rq = get_request(q, rw_flags, bio, GFP_NOIO);
while (!rq) {
DEFINE_WAIT(wait);
struct io_context *ioc;
struct request_list *rl = &q->rq;

if (unlikely(blk_queue_dead(q)))
Expand All @@ -912,8 +927,8 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
* up to a big batch of them for a small period time.
* See ioc_batching, ioc_set_batching
*/
ioc = current_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, ioc);
create_io_context(current, GFP_NOIO, q->node);
ioc_set_batching(q, current->io_context);

spin_lock_irq(q->queue_lock);
finish_wait(&rl->wait[is_sync], &wait);
Expand Down
62 changes: 17 additions & 45 deletions trunk/block/blk-ioc.c
Original file line number Diff line number Diff line change
Expand Up @@ -205,16 +205,15 @@ void exit_io_context(struct task_struct *task)
put_io_context(ioc, NULL);
}

static struct io_context *create_task_io_context(struct task_struct *task,
gfp_t gfp_flags, int node,
bool take_ref)
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
int node)
{
struct io_context *ioc;

ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
node);
if (unlikely(!ioc))
return NULL;
return;

/* initialize */
atomic_long_set(&ioc->refcount, 1);
Expand All @@ -226,42 +225,13 @@ static struct io_context *create_task_io_context(struct task_struct *task,

/* try to install, somebody might already have beaten us to it */
task_lock(task);

if (!task->io_context && !(task->flags & PF_EXITING)) {
if (!task->io_context && !(task->flags & PF_EXITING))
task->io_context = ioc;
} else {
else
kmem_cache_free(iocontext_cachep, ioc);
ioc = task->io_context;
}

if (ioc && take_ref)
get_io_context(ioc);

task_unlock(task);
return ioc;
}

/**
* current_io_context - get io_context of %current
* @gfp_flags: allocation flags, used if allocation is necessary
* @node: allocation node, used if allocation is necessary
*
* Return io_context of %current. If it doesn't exist, it is created with
* @gfp_flags and @node. The returned io_context does NOT have its
* reference count incremented. Because io_context is exited only on task
* exit, %current can be sure that the returned io_context is valid and
* alive as long as it is executing.
*/
struct io_context *current_io_context(gfp_t gfp_flags, int node)
{
might_sleep_if(gfp_flags & __GFP_WAIT);

if (current->io_context)
return current->io_context;

return create_task_io_context(current, gfp_flags, node, false);
}
EXPORT_SYMBOL(current_io_context);
EXPORT_SYMBOL(create_io_context_slowpath);

/**
* get_task_io_context - get io_context of a task
Expand All @@ -274,7 +244,7 @@ EXPORT_SYMBOL(current_io_context);
* incremented.
*
* This function always goes through task_lock() and it's better to use
* current_io_context() + get_io_context() for %current.
* %current->io_context + get_io_context() for %current.
*/
struct io_context *get_task_io_context(struct task_struct *task,
gfp_t gfp_flags, int node)
Expand All @@ -283,16 +253,18 @@ struct io_context *get_task_io_context(struct task_struct *task,

might_sleep_if(gfp_flags & __GFP_WAIT);

task_lock(task);
ioc = task->io_context;
if (likely(ioc)) {
get_io_context(ioc);
do {
task_lock(task);
ioc = task->io_context;
if (likely(ioc)) {
get_io_context(ioc);
task_unlock(task);
return ioc;
}
task_unlock(task);
return ioc;
}
task_unlock(task);
} while (create_io_context(task, gfp_flags, node));

return create_task_io_context(task, gfp_flags, node, true);
return NULL;
}
EXPORT_SYMBOL(get_task_io_context);

Expand Down
36 changes: 33 additions & 3 deletions trunk/block/blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,9 +127,6 @@ static inline int blk_should_fake_timeout(struct request_queue *q)
}
#endif

void get_io_context(struct io_context *ioc);
struct io_context *current_io_context(gfp_t gfp_flags, int node);

int ll_back_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio);
int ll_front_merge_fn(struct request_queue *q, struct request *req,
Expand Down Expand Up @@ -198,6 +195,39 @@ static inline int blk_do_io_stat(struct request *rq)
(rq->cmd_flags & REQ_DISCARD));
}

/*
* Internal io_context interface
*/
void get_io_context(struct io_context *ioc);

void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
int node);

/**
* create_io_context - try to create task->io_context
* @task: target task
* @gfp_mask: allocation mask
* @node: allocation node
*
* If @task->io_context is %NULL, allocate a new io_context and install it.
* Returns the current @task->io_context which may be %NULL if allocation
* failed.
*
* Note that this function can't be called with IRQ disabled because
* task_lock which protects @task->io_context is IRQ-unsafe.
*/
static inline struct io_context *create_io_context(struct task_struct *task,
gfp_t gfp_mask, int node)
{
WARN_ON_ONCE(irqs_disabled());
if (unlikely(!task->io_context))
create_io_context_slowpath(task, gfp_mask, node);
return task->io_context;
}

/*
* Internal throttling interface
*/
#ifdef CONFIG_BLK_DEV_THROTTLING
extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
extern void blk_throtl_drain(struct request_queue *q);
Expand Down
2 changes: 1 addition & 1 deletion trunk/block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -3012,7 +3012,7 @@ static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask)
might_sleep_if(gfp_mask & __GFP_WAIT);

/* allocate stuff */
ioc = current_io_context(gfp_mask, q->node);
ioc = create_io_context(current, gfp_mask, q->node);
if (!ioc)
goto out;

Expand Down

0 comments on commit 1a7bf8c

Please sign in to comment.