Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 230612
b: refs/heads/master
c: dddd9dc
h: refs/heads/master
v: v3
  • Loading branch information
Tejun Heo authored and Jens Axboe committed Dec 16, 2010
1 parent 5a2993c commit 1b9c97f
Show file tree
Hide file tree
Showing 17 changed files with 96 additions and 162 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 797a455d2c682476c3797dbfecf5bf84c1e3b9d3
refs/heads/master: dddd9dc340ae1a41d90e084529ca979c77c4ecfe
30 changes: 6 additions & 24 deletions trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,27 +64,13 @@ static void drive_stat_acct(struct request *rq, int new_io)
return;

cpu = part_stat_lock();
part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));

if (!new_io) {
part = rq->part;
if (!new_io)
part_stat_inc(cpu, part, merges[rw]);
} else {
part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
if (!hd_struct_try_get(part)) {
/*
* The partition is already being removed,
* the request will be accounted on the disk only
*
* We take a reference on disk->part0 although that
* partition will never be deleted, so we can treat
* it as any other partition.
*/
part = &rq->rq_disk->part0;
hd_struct_get(part);
}
else {
part_round_stats(cpu, part);
part_inc_in_flight(part, rw);
rq->part = part;
}

part_stat_unlock();
Expand Down Expand Up @@ -142,7 +128,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->ref_count = 1;
rq->start_time = jiffies;
set_start_time_ns(rq);
rq->part = NULL;
}
EXPORT_SYMBOL(blk_rq_init);

Expand Down Expand Up @@ -1791,7 +1776,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
int cpu;

cpu = part_stat_lock();
part = req->part;
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_stat_add(cpu, part, sectors[rw], bytes >> 9);
part_stat_unlock();
}
Expand All @@ -1811,14 +1796,13 @@ static void blk_account_io_done(struct request *req)
int cpu;

cpu = part_stat_lock();
part = req->part;
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));

part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, ticks[rw], duration);
part_round_stats(cpu, part);
part_dec_in_flight(part, rw);

hd_struct_put(part);
part_stat_unlock();
}
}
Expand Down Expand Up @@ -2622,9 +2606,7 @@ int __init blk_dev_init(void)
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
sizeof(((struct request *)0)->cmd_flags));

/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
kblockd_workqueue = create_workqueue("kblockd");
if (!kblockd_workqueue)
panic("Failed to create kblockd\n");

Expand Down
5 changes: 3 additions & 2 deletions trunk/block/blk-ioc.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ static void cfq_exit(struct io_context *ioc)
rcu_read_unlock();
}

/* Called by the exiting task */
/* Called by the exitting task */
void exit_io_context(struct task_struct *task)
{
struct io_context *ioc;
Expand All @@ -74,9 +74,10 @@ void exit_io_context(struct task_struct *task)
task->io_context = NULL;
task_unlock(task);

if (atomic_dec_and_test(&ioc->nr_tasks))
if (atomic_dec_and_test(&ioc->nr_tasks)) {
cfq_exit(ioc);

}
put_io_context(ioc);
}

Expand Down
3 changes: 1 addition & 2 deletions trunk/block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -351,12 +351,11 @@ static void blk_account_io_merge(struct request *req)
int cpu;

cpu = part_stat_lock();
part = req->part;
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));

part_round_stats(cpu, part);
part_dec_in_flight(part, rq_data_dir(req));

hd_struct_put(part);
part_stat_unlock();
}
}
Expand Down
56 changes: 28 additions & 28 deletions trunk/block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ struct cfq_rb_root {
*/
struct cfq_queue {
/* reference count */
int ref;
atomic_t ref;
/* various state flags, see below */
unsigned int flags;
/* parent cfq_data */
Expand Down Expand Up @@ -207,7 +207,7 @@ struct cfq_group {
struct blkio_group blkg;
#ifdef CONFIG_CFQ_GROUP_IOSCHED
struct hlist_node cfqd_node;
int ref;
atomic_t ref;
#endif
/* number of requests that are on the dispatch list or inside driver */
int dispatched;
Expand Down Expand Up @@ -1014,7 +1014,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
* elevator which will be dropped by either elevator exit
* or cgroup deletion path depending on who is exiting first.
*/
cfqg->ref = 1;
atomic_set(&cfqg->ref, 1);

/*
* Add group onto cgroup list. It might happen that bdi->dev is
Expand Down Expand Up @@ -1059,7 +1059,7 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)

static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
{
cfqg->ref++;
atomic_inc(&cfqg->ref);
return cfqg;
}

Expand All @@ -1071,17 +1071,16 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)

cfqq->cfqg = cfqg;
/* cfqq reference on cfqg */
cfqq->cfqg->ref++;
atomic_inc(&cfqq->cfqg->ref);
}

static void cfq_put_cfqg(struct cfq_group *cfqg)
{
struct cfq_rb_root *st;
int i, j;

BUG_ON(cfqg->ref <= 0);
cfqg->ref--;
if (cfqg->ref)
BUG_ON(atomic_read(&cfqg->ref) <= 0);
if (!atomic_dec_and_test(&cfqg->ref))
return;
for_each_cfqg_st(cfqg, i, j, st)
BUG_ON(!RB_EMPTY_ROOT(&st->rb));
Expand Down Expand Up @@ -1189,7 +1188,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_group_service_tree_del(cfqd, cfqq->cfqg);
cfqq->orig_cfqg = cfqq->cfqg;
cfqq->cfqg = &cfqd->root_group;
cfqd->root_group.ref++;
atomic_inc(&cfqd->root_group.ref);
group_changed = 1;
} else if (!cfqd->cfq_group_isolation
&& cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
Expand Down Expand Up @@ -2026,7 +2025,7 @@ static int cfqq_process_refs(struct cfq_queue *cfqq)
int process_refs, io_refs;

io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
process_refs = cfqq->ref - io_refs;
process_refs = atomic_read(&cfqq->ref) - io_refs;
BUG_ON(process_refs < 0);
return process_refs;
}
Expand Down Expand Up @@ -2066,10 +2065,10 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
*/
if (new_process_refs >= process_refs) {
cfqq->new_cfqq = new_cfqq;
new_cfqq->ref += process_refs;
atomic_add(process_refs, &new_cfqq->ref);
} else {
new_cfqq->new_cfqq = cfqq;
cfqq->ref += new_process_refs;
atomic_add(new_process_refs, &cfqq->ref);
}
}

Expand Down Expand Up @@ -2104,6 +2103,12 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
unsigned group_slice;
enum wl_prio_t original_prio = cfqd->serving_prio;

if (!cfqg) {
cfqd->serving_prio = IDLE_WORKLOAD;
cfqd->workload_expires = jiffies + 1;
return;
}

/* Choose next priority. RT > BE > IDLE */
if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
cfqd->serving_prio = RT_WORKLOAD;
Expand Down Expand Up @@ -2533,10 +2538,9 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
struct cfq_data *cfqd = cfqq->cfqd;
struct cfq_group *cfqg, *orig_cfqg;

BUG_ON(cfqq->ref <= 0);
BUG_ON(atomic_read(&cfqq->ref) <= 0);

cfqq->ref--;
if (cfqq->ref)
if (!atomic_dec_and_test(&cfqq->ref))
return;

cfq_log_cfqq(cfqd, cfqq, "put_queue");
Expand Down Expand Up @@ -2839,7 +2843,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
RB_CLEAR_NODE(&cfqq->p_node);
INIT_LIST_HEAD(&cfqq->fifo);

cfqq->ref = 0;
atomic_set(&cfqq->ref, 0);
cfqq->cfqd = cfqd;

cfq_mark_cfqq_prio_changed(cfqq);
Expand Down Expand Up @@ -2975,11 +2979,11 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
* pin the queue now that it's allocated, scheduler exit will prune it
*/
if (!is_sync && !(*async_cfqq)) {
cfqq->ref++;
atomic_inc(&cfqq->ref);
*async_cfqq = cfqq;
}

cfqq->ref++;
atomic_inc(&cfqq->ref);
return cfqq;
}

Expand Down Expand Up @@ -3681,13 +3685,13 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
}

cfqq->allocated[rw]++;
cfqq->ref++;
rq->elevator_private = cic;
rq->elevator_private2 = cfqq;
rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
atomic_inc(&cfqq->ref);

spin_unlock_irqrestore(q->queue_lock, flags);

rq->elevator_private = cic;
rq->elevator_private2 = cfqq;
rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
return 0;

queue_fail:
Expand Down Expand Up @@ -3862,10 +3866,6 @@ static void *cfq_init_queue(struct request_queue *q)
if (!cfqd)
return NULL;

/*
* Don't need take queue_lock in the routine, since we are
* initializing the ioscheduler, and nobody is using cfqd
*/
cfqd->cic_index = i;

/* Init root service tree */
Expand All @@ -3885,7 +3885,7 @@ static void *cfq_init_queue(struct request_queue *q)
* Take a reference to root group which we never drop. This is just
* to make sure that cfq_put_cfqg() does not try to kfree root group
*/
cfqg->ref = 1;
atomic_set(&cfqg->ref, 1);
rcu_read_lock();
cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
(void *)cfqd, 0);
Expand All @@ -3905,7 +3905,7 @@ static void *cfq_init_queue(struct request_queue *q)
* will not attempt to free it.
*/
cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
cfqd->oom_cfqq.ref++;
atomic_inc(&cfqd->oom_cfqq.ref);
cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);

INIT_LIST_HEAD(&cfqd->cic_list);
Expand Down
31 changes: 2 additions & 29 deletions trunk/block/genhd.c
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ static struct blk_major_name {
} *major_names[BLKDEV_MAJOR_HASH_SIZE];

/* index in the above - for now: assume no multimajor ranges */
static inline int major_to_index(unsigned major)
static inline int major_to_index(int major)
{
return major % BLKDEV_MAJOR_HASH_SIZE;
}
Expand Down Expand Up @@ -735,7 +735,7 @@ static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
static void *p;

p = disk_seqf_start(seqf, pos);
if (!IS_ERR_OR_NULL(p) && !*pos)
if (!IS_ERR(p) && p && !*pos)
seq_puts(seqf, "major minor #blocks name\n\n");
return p;
}
Expand Down Expand Up @@ -1110,29 +1110,6 @@ static int __init proc_genhd_init(void)
module_init(proc_genhd_init);
#endif /* CONFIG_PROC_FS */

static void media_change_notify_thread(struct work_struct *work)
{
struct gendisk *gd = container_of(work, struct gendisk, async_notify);
char event[] = "MEDIA_CHANGE=1";
char *envp[] = { event, NULL };

/*
* set enviroment vars to indicate which event this is for
* so that user space will know to go check the media status.
*/
kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
put_device(gd->driverfs_dev);
}

#if 0
void genhd_media_change_notify(struct gendisk *disk)
{
get_device(disk->driverfs_dev);
schedule_work(&disk->async_notify);
}
EXPORT_SYMBOL_GPL(genhd_media_change_notify);
#endif /* 0 */

dev_t blk_lookup_devt(const char *name, int partno)
{
dev_t devt = MKDEV(0, 0);
Expand Down Expand Up @@ -1193,15 +1170,11 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
}
disk->part_tbl->part[0] = &disk->part0;

hd_ref_init(&disk->part0);

disk->minors = minors;
rand_initialize_disk(disk);
disk_to_dev(disk)->class = &block_class;
disk_to_dev(disk)->type = &disk_type;
device_initialize(disk_to_dev(disk));
INIT_WORK(&disk->async_notify,
media_change_notify_thread);
}
return disk;
}
Expand Down
6 changes: 5 additions & 1 deletion trunk/drivers/block/loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -395,7 +395,11 @@ lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct loop_device *lo = p->lo;
struct page *page = buf->page;
sector_t IV;
int size;
int size, ret;

ret = buf->ops->confirm(pipe, buf);
if (unlikely(ret))
return ret;

IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
(buf->offset >> 9);
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/md/dm.c
Original file line number Diff line number Diff line change
Expand Up @@ -630,7 +630,7 @@ static void dec_pending(struct dm_io *io, int error)
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
trace_block_bio_complete(md->queue, bio, io_error);
trace_block_bio_complete(md->queue, bio);
bio_endio(bio, io_error);
}
}
Expand Down
7 changes: 1 addition & 6 deletions trunk/fs/bio-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -782,12 +782,7 @@ void __init bio_integrity_init(void)
{
unsigned int i;

/*
* kintegrityd won't block much but may burn a lot of CPU cycles.
* Make it highpri CPU intensive wq with max concurrency of 1.
*/
kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
kintegrityd_wq = create_workqueue("kintegrityd");
if (!kintegrityd_wq)
panic("Failed to create kintegrityd\n");

Expand Down
Loading

0 comments on commit 1b9c97f

Please sign in to comment.