Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 285105
b: refs/heads/master
c: b2efa05
h: refs/heads/master
i:
  285103: ae2da98
v: v3
  • Loading branch information
Tejun Heo authored and Jens Axboe committed Dec 13, 2011
1 parent 8d538b7 commit fcd3817
Show file tree
Hide file tree
Showing 8 changed files with 160 additions and 73 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f1a4f4d35ff30a328d5ea28f6cc826b2083111d2
refs/heads/master: b2efa05265d62bc29f3a64400fad4b44340eedb8
2 changes: 1 addition & 1 deletion trunk/block/blk-cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1649,7 +1649,7 @@ static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
ioc = get_task_io_context(tsk, GFP_ATOMIC, NUMA_NO_NODE);
if (ioc) {
ioc_cgroup_changed(ioc);
put_io_context(ioc);
put_io_context(ioc, NULL);
}
}

Expand Down
166 changes: 137 additions & 29 deletions trunk/block/blk-ioc.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,55 +29,164 @@ void get_io_context(struct io_context *ioc)
}
EXPORT_SYMBOL(get_io_context);

static void cfq_dtor(struct io_context *ioc)
/*
* Releasing ioc may nest into another put_io_context() leading to nested
* fast path release. As the ioc's can't be the same, this is okay but
* makes lockdep whine. Keep track of nesting and use it as subclass.
*/
#ifdef CONFIG_LOCKDEP
#define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0)
#define ioc_release_depth_inc(q) (q)->ioc_release_depth++
#define ioc_release_depth_dec(q) (q)->ioc_release_depth--
#else
#define ioc_release_depth(q) 0
#define ioc_release_depth_inc(q) do { } while (0)
#define ioc_release_depth_dec(q) do { } while (0)
#endif

/*
* Slow path for ioc release in put_io_context(). Performs double-lock
* dancing to unlink all cic's and then frees ioc.
*/
static void ioc_release_fn(struct work_struct *work)
{
if (!hlist_empty(&ioc->cic_list)) {
struct cfq_io_context *cic;
struct io_context *ioc = container_of(work, struct io_context,
release_work);
struct request_queue *last_q = NULL;

spin_lock_irq(&ioc->lock);

while (!hlist_empty(&ioc->cic_list)) {
struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first,
struct cfq_io_context,
cic_list);
struct request_queue *this_q = cic->q;

if (this_q != last_q) {
/*
* Need to switch to @this_q. Once we release
* @ioc->lock, it can go away along with @cic.
* Hold on to it.
*/
__blk_get_queue(this_q);

/*
* blk_put_queue() might sleep thanks to kobject
* idiocy. Always release both locks, put and
* restart.
*/
if (last_q) {
spin_unlock(last_q->queue_lock);
spin_unlock_irq(&ioc->lock);
blk_put_queue(last_q);
} else {
spin_unlock_irq(&ioc->lock);
}

last_q = this_q;
spin_lock_irq(this_q->queue_lock);
spin_lock(&ioc->lock);
continue;
}
ioc_release_depth_inc(this_q);
cic->exit(cic);
cic->release(cic);
ioc_release_depth_dec(this_q);
}

cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
cic_list);
cic->dtor(ioc);
if (last_q) {
spin_unlock(last_q->queue_lock);
spin_unlock_irq(&ioc->lock);
blk_put_queue(last_q);
} else {
spin_unlock_irq(&ioc->lock);
}

kmem_cache_free(iocontext_cachep, ioc);
}

/**
* put_io_context - put a reference of io_context
* @ioc: io_context to put
* @locked_q: request_queue the caller is holding queue_lock of (hint)
*
* Decrement reference count of @ioc and release it if the count reaches
* zero.
* zero. If the caller is holding queue_lock of a queue, it can indicate
* that with @locked_q. This is an optimization hint and the caller is
* allowed to pass in %NULL even when it's holding a queue_lock.
*/
void put_io_context(struct io_context *ioc)
void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
{
struct request_queue *last_q = locked_q;
unsigned long flags;

if (ioc == NULL)
return;

BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
if (locked_q)
lockdep_assert_held(locked_q->queue_lock);

if (!atomic_long_dec_and_test(&ioc->refcount))
return;

rcu_read_lock();
cfq_dtor(ioc);
rcu_read_unlock();

kmem_cache_free(iocontext_cachep, ioc);
}
EXPORT_SYMBOL(put_io_context);
/*
* Destroy @ioc. This is a bit messy because cic's are chained
* from both ioc and queue, and ioc->lock nests inside queue_lock.
* The inner ioc->lock should be held to walk our cic_list and then
* for each cic the outer matching queue_lock should be grabbed.
* ie. We need to do reverse-order double lock dancing.
*
* Another twist is that we are often called with one of the
* matching queue_locks held as indicated by @locked_q, which
* prevents performing double-lock dance for other queues.
*
* So, we do it in two stages. The fast path uses the queue_lock
* the caller is holding and, if other queues need to be accessed,
* uses trylock to avoid introducing locking dependency. This can
* handle most cases, especially if @ioc was performing IO on only
* single device.
*
* If trylock doesn't cut it, we defer to @ioc->release_work which
* can do all the double-locking dancing.
*/
spin_lock_irqsave_nested(&ioc->lock, flags,
ioc_release_depth(locked_q));

while (!hlist_empty(&ioc->cic_list)) {
struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first,
struct cfq_io_context,
cic_list);
struct request_queue *this_q = cic->q;

if (this_q != last_q) {
if (last_q && last_q != locked_q)
spin_unlock(last_q->queue_lock);
last_q = NULL;

if (!spin_trylock(this_q->queue_lock))
break;
last_q = this_q;
continue;
}
ioc_release_depth_inc(this_q);
cic->exit(cic);
cic->release(cic);
ioc_release_depth_dec(this_q);
}

static void cfq_exit(struct io_context *ioc)
{
rcu_read_lock();
if (last_q && last_q != locked_q)
spin_unlock(last_q->queue_lock);

if (!hlist_empty(&ioc->cic_list)) {
struct cfq_io_context *cic;
spin_unlock_irqrestore(&ioc->lock, flags);

cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
cic_list);
cic->exit(ioc);
}
rcu_read_unlock();
/* if no cic's left, we're done; otherwise, kick release_work */
if (hlist_empty(&ioc->cic_list))
kmem_cache_free(iocontext_cachep, ioc);
else
schedule_work(&ioc->release_work);
}
EXPORT_SYMBOL(put_io_context);

/* Called by the exiting task */
void exit_io_context(struct task_struct *task)
Expand All @@ -92,10 +201,8 @@ void exit_io_context(struct task_struct *task)
task->io_context = NULL;
task_unlock(task);

if (atomic_dec_and_test(&ioc->nr_tasks))
cfq_exit(ioc);

put_io_context(ioc);
atomic_dec(&ioc->nr_tasks);
put_io_context(ioc, NULL);
}

static struct io_context *create_task_io_context(struct task_struct *task,
Expand All @@ -115,6 +222,7 @@ static struct io_context *create_task_io_context(struct task_struct *task,
spin_lock_init(&ioc->lock);
INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ioc->cic_list);
INIT_WORK(&ioc->release_work, ioc_release_fn);

/* try to install, somebody might already have beaten us to it */
task_lock(task);
Expand Down
44 changes: 8 additions & 36 deletions trunk/block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1778,7 +1778,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->active_queue = NULL;

if (cfqd->active_cic) {
put_io_context(cfqd->active_cic->ioc);
put_io_context(cfqd->active_cic->ioc, cfqd->queue);
cfqd->active_cic = NULL;
}
}
Expand Down Expand Up @@ -2812,38 +2812,6 @@ static void cfq_exit_cic(struct cfq_io_context *cic)
}
}

static void cfq_exit_single_io_context(struct io_context *ioc,
struct cfq_io_context *cic)
{
struct cfq_data *cfqd = cic_to_cfqd(cic);

if (cfqd) {
struct request_queue *q = cfqd->queue;
unsigned long flags;

spin_lock_irqsave(q->queue_lock, flags);

/*
* Ensure we get a fresh copy of the ->key to prevent
* race between exiting task and queue
*/
smp_read_barrier_depends();
if (cic->key == cfqd)
cfq_exit_cic(cic);

spin_unlock_irqrestore(q->queue_lock, flags);
}
}

/*
* The process that ioc belongs to has exited, we need to clean up
* and put the internal structures we have that belongs to that process.
*/
static void cfq_exit_io_context(struct io_context *ioc)
{
call_for_each_cic(ioc, cfq_exit_single_io_context);
}

static struct cfq_io_context *
cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
{
Expand All @@ -2855,8 +2823,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
cic->ttime.last_end_request = jiffies;
INIT_LIST_HEAD(&cic->queue_list);
INIT_HLIST_NODE(&cic->cic_list);
cic->dtor = cfq_free_io_context;
cic->exit = cfq_exit_io_context;
cic->exit = cfq_exit_cic;
cic->release = cfq_release_cic;
elv_ioc_count_inc(cfq_ioc_count);
}

Expand Down Expand Up @@ -3726,7 +3694,7 @@ static void cfq_put_request(struct request *rq)
BUG_ON(!cfqq->allocated[rw]);
cfqq->allocated[rw]--;

put_io_context(RQ_CIC(rq)->ioc);
put_io_context(RQ_CIC(rq)->ioc, cfqq->cfqd->queue);

rq->elevator_private[0] = NULL;
rq->elevator_private[1] = NULL;
Expand Down Expand Up @@ -3937,8 +3905,12 @@ static void cfq_exit_queue(struct elevator_queue *e)
struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
struct cfq_io_context,
queue_list);
struct io_context *ioc = cic->ioc;

spin_lock(&ioc->lock);
cfq_exit_cic(cic);
cfq_release_cic(cic);
spin_unlock(&ioc->lock);
}

cfq_put_async_queues(cfqd);
Expand Down
2 changes: 1 addition & 1 deletion trunk/fs/ioprio.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
if (ioc) {
ioc_ioprio_changed(ioc, ioprio);
put_io_context(ioc);
put_io_context(ioc, NULL);
}

return err;
Expand Down
3 changes: 3 additions & 0 deletions trunk/include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -393,6 +393,9 @@ struct request_queue {
/* Throttle data */
struct throtl_data *td;
#endif
#ifdef CONFIG_LOCKDEP
int ioc_release_depth;
#endif
};

#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
Expand Down
12 changes: 8 additions & 4 deletions trunk/include/linux/iocontext.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
#include <linux/workqueue.h>

struct cfq_queue;
struct cfq_ttime {
Expand Down Expand Up @@ -33,8 +34,8 @@ struct cfq_io_context {

unsigned long changed;

void (*dtor)(struct io_context *); /* destructor */
void (*exit)(struct io_context *); /* called on task exit */
void (*exit)(struct cfq_io_context *);
void (*release)(struct cfq_io_context *);

struct rcu_head rcu_head;
};
Expand All @@ -61,6 +62,8 @@ struct io_context {
struct radix_tree_root radix_root;
struct hlist_head cic_list;
void __rcu *ioc_data;

struct work_struct release_work;
};

static inline struct io_context *ioc_task_link(struct io_context *ioc)
Expand All @@ -79,15 +82,16 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)

struct task_struct;
#ifdef CONFIG_BLOCK
void put_io_context(struct io_context *ioc);
void put_io_context(struct io_context *ioc, struct request_queue *locked_q);
void exit_io_context(struct task_struct *task);
struct io_context *get_task_io_context(struct task_struct *task,
gfp_t gfp_flags, int node);
void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
void ioc_cgroup_changed(struct io_context *ioc);
#else
struct io_context;
static inline void put_io_context(struct io_context *ioc) { }
static inline void put_io_context(struct io_context *ioc,
struct request_queue *locked_q) { }
static inline void exit_io_context(struct task_struct *task) { }
#endif

Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -887,7 +887,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
return -ENOMEM;

new_ioc->ioprio = ioc->ioprio;
put_io_context(new_ioc);
put_io_context(new_ioc, NULL);
}
#endif
return 0;
Expand Down

0 comments on commit fcd3817

Please sign in to comment.