Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 230622
b: refs/heads/master
c: 81c5e2a
h: refs/heads/master
v: v3
  • Loading branch information
Jens Axboe committed Jan 13, 2011
1 parent da8ed1b commit ef9192f
Show file tree
Hide file tree
Showing 17 changed files with 136 additions and 96 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: fcc57045d53edc35bcce456e60ac4aa802712934
refs/heads/master: 81c5e2ae33c4b19e53966b427e33646bf6811830
30 changes: 24 additions & 6 deletions trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,13 +64,27 @@ static void drive_stat_acct(struct request *rq, int new_io)
return;

cpu = part_stat_lock();
part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));

if (!new_io)
if (!new_io) {
part = rq->part;
part_stat_inc(cpu, part, merges[rw]);
else {
} else {
part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
if (!hd_struct_try_get(part)) {
/*
* The partition is already being removed,
* the request will be accounted on the disk only
*
* We take a reference on disk->part0 although that
* partition will never be deleted, so we can treat
* it as any other partition.
*/
part = &rq->rq_disk->part0;
hd_struct_get(part);
}
part_round_stats(cpu, part);
part_inc_in_flight(part, rw);
rq->part = part;
}

part_stat_unlock();
Expand Down Expand Up @@ -128,6 +142,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->ref_count = 1;
rq->start_time = jiffies;
set_start_time_ns(rq);
rq->part = NULL;
}
EXPORT_SYMBOL(blk_rq_init);

Expand Down Expand Up @@ -1776,7 +1791,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
int cpu;

cpu = part_stat_lock();
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part = req->part;
part_stat_add(cpu, part, sectors[rw], bytes >> 9);
part_stat_unlock();
}
Expand All @@ -1796,13 +1811,14 @@ static void blk_account_io_done(struct request *req)
int cpu;

cpu = part_stat_lock();
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part = req->part;

part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, ticks[rw], duration);
part_round_stats(cpu, part);
part_dec_in_flight(part, rw);

hd_struct_put(part);
part_stat_unlock();
}
}
Expand Down Expand Up @@ -2606,7 +2622,9 @@ int __init blk_dev_init(void)
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
sizeof(((struct request *)0)->cmd_flags));

kblockd_workqueue = create_workqueue("kblockd");
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!kblockd_workqueue)
panic("Failed to create kblockd\n");

Expand Down
5 changes: 2 additions & 3 deletions trunk/block/blk-ioc.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ static void cfq_exit(struct io_context *ioc)
rcu_read_unlock();
}

/* Called by the exitting task */
/* Called by the exiting task */
void exit_io_context(struct task_struct *task)
{
struct io_context *ioc;
Expand All @@ -74,10 +74,9 @@ void exit_io_context(struct task_struct *task)
task->io_context = NULL;
task_unlock(task);

if (atomic_dec_and_test(&ioc->nr_tasks)) {
if (atomic_dec_and_test(&ioc->nr_tasks))
cfq_exit(ioc);

}
put_io_context(ioc);
}

Expand Down
3 changes: 2 additions & 1 deletion trunk/block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -351,11 +351,12 @@ static void blk_account_io_merge(struct request *req)
int cpu;

cpu = part_stat_lock();
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part = req->part;

part_round_stats(cpu, part);
part_dec_in_flight(part, rq_data_dir(req));

hd_struct_put(part);
part_stat_unlock();
}
}
Expand Down
56 changes: 28 additions & 28 deletions trunk/block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ struct cfq_rb_root {
*/
struct cfq_queue {
/* reference count */
atomic_t ref;
int ref;
/* various state flags, see below */
unsigned int flags;
/* parent cfq_data */
Expand Down Expand Up @@ -207,7 +207,7 @@ struct cfq_group {
struct blkio_group blkg;
#ifdef CONFIG_CFQ_GROUP_IOSCHED
struct hlist_node cfqd_node;
atomic_t ref;
int ref;
#endif
/* number of requests that are on the dispatch list or inside driver */
int dispatched;
Expand Down Expand Up @@ -1014,7 +1014,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
* elevator which will be dropped by either elevator exit
* or cgroup deletion path depending on who is exiting first.
*/
atomic_set(&cfqg->ref, 1);
cfqg->ref = 1;

/*
* Add group onto cgroup list. It might happen that bdi->dev is
Expand Down Expand Up @@ -1059,7 +1059,7 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)

static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
{
atomic_inc(&cfqg->ref);
cfqg->ref++;
return cfqg;
}

Expand All @@ -1071,16 +1071,17 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)

cfqq->cfqg = cfqg;
/* cfqq reference on cfqg */
atomic_inc(&cfqq->cfqg->ref);
cfqq->cfqg->ref++;
}

static void cfq_put_cfqg(struct cfq_group *cfqg)
{
struct cfq_rb_root *st;
int i, j;

BUG_ON(atomic_read(&cfqg->ref) <= 0);
if (!atomic_dec_and_test(&cfqg->ref))
BUG_ON(cfqg->ref <= 0);
cfqg->ref--;
if (cfqg->ref)
return;
for_each_cfqg_st(cfqg, i, j, st)
BUG_ON(!RB_EMPTY_ROOT(&st->rb));
Expand Down Expand Up @@ -1188,7 +1189,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_group_service_tree_del(cfqd, cfqq->cfqg);
cfqq->orig_cfqg = cfqq->cfqg;
cfqq->cfqg = &cfqd->root_group;
atomic_inc(&cfqd->root_group.ref);
cfqd->root_group.ref++;
group_changed = 1;
} else if (!cfqd->cfq_group_isolation
&& cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
Expand Down Expand Up @@ -2025,7 +2026,7 @@ static int cfqq_process_refs(struct cfq_queue *cfqq)
int process_refs, io_refs;

io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
process_refs = atomic_read(&cfqq->ref) - io_refs;
process_refs = cfqq->ref - io_refs;
BUG_ON(process_refs < 0);
return process_refs;
}
Expand Down Expand Up @@ -2065,10 +2066,10 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
*/
if (new_process_refs >= process_refs) {
cfqq->new_cfqq = new_cfqq;
atomic_add(process_refs, &new_cfqq->ref);
new_cfqq->ref += process_refs;
} else {
new_cfqq->new_cfqq = cfqq;
atomic_add(new_process_refs, &cfqq->ref);
cfqq->ref += new_process_refs;
}
}

Expand Down Expand Up @@ -2103,12 +2104,6 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
unsigned group_slice;
enum wl_prio_t original_prio = cfqd->serving_prio;

if (!cfqg) {
cfqd->serving_prio = IDLE_WORKLOAD;
cfqd->workload_expires = jiffies + 1;
return;
}

/* Choose next priority. RT > BE > IDLE */
if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
cfqd->serving_prio = RT_WORKLOAD;
Expand Down Expand Up @@ -2538,9 +2533,10 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
struct cfq_data *cfqd = cfqq->cfqd;
struct cfq_group *cfqg, *orig_cfqg;

BUG_ON(atomic_read(&cfqq->ref) <= 0);
BUG_ON(cfqq->ref <= 0);

if (!atomic_dec_and_test(&cfqq->ref))
cfqq->ref--;
if (cfqq->ref)
return;

cfq_log_cfqq(cfqd, cfqq, "put_queue");
Expand Down Expand Up @@ -2843,7 +2839,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
RB_CLEAR_NODE(&cfqq->p_node);
INIT_LIST_HEAD(&cfqq->fifo);

atomic_set(&cfqq->ref, 0);
cfqq->ref = 0;
cfqq->cfqd = cfqd;

cfq_mark_cfqq_prio_changed(cfqq);
Expand Down Expand Up @@ -2979,11 +2975,11 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
* pin the queue now that it's allocated, scheduler exit will prune it
*/
if (!is_sync && !(*async_cfqq)) {
atomic_inc(&cfqq->ref);
cfqq->ref++;
*async_cfqq = cfqq;
}

atomic_inc(&cfqq->ref);
cfqq->ref++;
return cfqq;
}

Expand Down Expand Up @@ -3685,13 +3681,13 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
}

cfqq->allocated[rw]++;
atomic_inc(&cfqq->ref);

spin_unlock_irqrestore(q->queue_lock, flags);

cfqq->ref++;
rq->elevator_private = cic;
rq->elevator_private2 = cfqq;
rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);

spin_unlock_irqrestore(q->queue_lock, flags);

return 0;

queue_fail:
Expand Down Expand Up @@ -3866,6 +3862,10 @@ static void *cfq_init_queue(struct request_queue *q)
if (!cfqd)
return NULL;

/*
* Don't need take queue_lock in the routine, since we are
* initializing the ioscheduler, and nobody is using cfqd
*/
cfqd->cic_index = i;

/* Init root service tree */
Expand All @@ -3885,7 +3885,7 @@ static void *cfq_init_queue(struct request_queue *q)
* Take a reference to root group which we never drop. This is just
* to make sure that cfq_put_cfqg() does not try to kfree root group
*/
atomic_set(&cfqg->ref, 1);
cfqg->ref = 1;
rcu_read_lock();
cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
(void *)cfqd, 0);
Expand All @@ -3905,7 +3905,7 @@ static void *cfq_init_queue(struct request_queue *q)
* will not attempt to free it.
*/
cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
atomic_inc(&cfqd->oom_cfqq.ref);
cfqd->oom_cfqq.ref++;
cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);

INIT_LIST_HEAD(&cfqd->cic_list);
Expand Down
6 changes: 4 additions & 2 deletions trunk/block/genhd.c
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ static struct blk_major_name {
} *major_names[BLKDEV_MAJOR_HASH_SIZE];

/* index in the above - for now: assume no multimajor ranges */
static inline int major_to_index(int major)
static inline int major_to_index(unsigned major)
{
return major % BLKDEV_MAJOR_HASH_SIZE;
}
Expand Down Expand Up @@ -828,7 +828,7 @@ static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
static void *p;

p = disk_seqf_start(seqf, pos);
if (!IS_ERR(p) && p && !*pos)
if (!IS_ERR_OR_NULL(p) && !*pos)
seq_puts(seqf, "major minor #blocks name\n\n");
return p;
}
Expand Down Expand Up @@ -1264,6 +1264,8 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
}
disk->part_tbl->part[0] = &disk->part0;

hd_ref_init(&disk->part0);

disk->minors = minors;
rand_initialize_disk(disk);
disk_to_dev(disk)->class = &block_class;
Expand Down
6 changes: 1 addition & 5 deletions trunk/drivers/block/loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -395,11 +395,7 @@ lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct loop_device *lo = p->lo;
struct page *page = buf->page;
sector_t IV;
int size, ret;

ret = buf->ops->confirm(pipe, buf);
if (unlikely(ret))
return ret;
int size;

IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
(buf->offset >> 9);
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/md/dm.c
Original file line number Diff line number Diff line change
Expand Up @@ -630,7 +630,7 @@ static void dec_pending(struct dm_io *io, int error)
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
trace_block_bio_complete(md->queue, bio);
trace_block_bio_complete(md->queue, bio, io_error);
bio_endio(bio, io_error);
}
}
Expand Down
7 changes: 6 additions & 1 deletion trunk/fs/bio-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -782,7 +782,12 @@ void __init bio_integrity_init(void)
{
unsigned int i;

kintegrityd_wq = create_workqueue("kintegrityd");
/*
* kintegrityd won't block much but may burn a lot of CPU cycles.
* Make it highpri CPU intensive wq with max concurrency of 1.
*/
kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
if (!kintegrityd_wq)
panic("Failed to create kintegrityd\n");

Expand Down
2 changes: 1 addition & 1 deletion trunk/fs/char_dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ static struct char_device_struct {
} *chrdevs[CHRDEV_MAJOR_HASH_SIZE];

/* index in the above */
static inline int major_to_index(int major)
static inline int major_to_index(unsigned major)
{
return major % CHRDEV_MAJOR_HASH_SIZE;
}
Expand Down
5 changes: 0 additions & 5 deletions trunk/fs/nfsd/vfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -845,11 +845,6 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct page **pp = rqstp->rq_respages + rqstp->rq_resused;
struct page *page = buf->page;
size_t size;
int ret;

ret = buf->ops->confirm(pipe, buf);
if (unlikely(ret))
return ret;

size = sd->len;

Expand Down
Loading

0 comments on commit ef9192f

Please sign in to comment.