Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 41165
b: refs/heads/master
c: 1399ff5
h: refs/heads/master
i:
  41163: 8434db2
v: v3
  • Loading branch information
Linus Torvalds committed Dec 2, 2006
1 parent 0668685 commit 72aca23
Show file tree
Hide file tree
Showing 14 changed files with 231 additions and 118 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6b44d4e69c6144d0df71ab47ec90d2009237d48f
refs/heads/master: 1399ff54741b3aa0aaf5097b8559fa30277ebe61
2 changes: 1 addition & 1 deletion trunk/block/as-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1317,7 +1317,7 @@ static void as_exit_queue(elevator_t *e)
/*
* initialize elevator private data (as_data).
*/
static void *as_init_queue(request_queue_t *q, elevator_t *e)
static void *as_init_queue(request_queue_t *q)
{
struct as_data *ad;

Expand Down
57 changes: 45 additions & 12 deletions trunk/block/blktrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,30 +22,61 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/time.h>
#include <asm/uaccess.h>

static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
static unsigned int blktrace_seq __read_mostly = 1;

/*
* Send out a notify message.
*/
static inline unsigned int trace_note(struct blk_trace *bt,
pid_t pid, int action,
const void *data, size_t len)
{
struct blk_io_trace *t;
int cpu = smp_processor_id();

t = relay_reserve(bt->rchan, sizeof(*t) + len);
if (t == NULL)
return 0;

t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
t->device = bt->dev;
t->action = action;
t->pid = pid;
t->cpu = cpu;
t->pdu_len = len;
memcpy((void *) t + sizeof(*t), data, len);
return blktrace_seq;
}

/*
* Send out a notify for this process, if we haven't done so since a trace
* started
*/
static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
{
struct blk_io_trace *t;
tsk->btrace_seq = trace_note(bt, tsk->pid,
BLK_TN_PROCESS,
tsk->comm, sizeof(tsk->comm));
}

t = relay_reserve(bt->rchan, sizeof(*t) + sizeof(tsk->comm));
if (t) {
t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
t->device = bt->dev;
t->action = BLK_TC_ACT(BLK_TC_NOTIFY);
t->pid = tsk->pid;
t->cpu = smp_processor_id();
t->pdu_len = sizeof(tsk->comm);
memcpy((void *) t + sizeof(*t), tsk->comm, t->pdu_len);
tsk->btrace_seq = blktrace_seq;
}
static void trace_note_time(struct blk_trace *bt)
{
struct timespec now;
unsigned long flags;
u32 words[2];

getnstimeofday(&now);
words[0] = now.tv_sec;
words[1] = now.tv_nsec;

local_irq_save(flags);
trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
local_irq_restore(flags);
}

static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
Expand Down Expand Up @@ -394,6 +425,8 @@ static int blk_trace_startstop(request_queue_t *q, int start)
blktrace_seq++;
smp_mb();
bt->trace_state = Blktrace_running;

trace_note_time(bt);
ret = 0;
}
} else {
Expand Down
9 changes: 4 additions & 5 deletions trunk/block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1464,8 +1464,7 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
}

static void
cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
struct request *rq)
cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
{
sector_t sdist;
u64 total;
Expand Down Expand Up @@ -1617,7 +1616,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}

cfq_update_io_thinktime(cfqd, cic);
cfq_update_io_seektime(cfqd, cic, rq);
cfq_update_io_seektime(cic, rq);
cfq_update_idle_window(cfqd, cfqq, cic);

cic->last_queue = jiffies;
Expand Down Expand Up @@ -1770,7 +1769,7 @@ static int cfq_may_queue(request_queue_t *q, int rw)
/*
* queue lock held here
*/
static void cfq_put_request(request_queue_t *q, struct request *rq)
static void cfq_put_request(struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);

Expand Down Expand Up @@ -1951,7 +1950,7 @@ static void cfq_exit_queue(elevator_t *e)
kfree(cfqd);
}

static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
static void *cfq_init_queue(request_queue_t *q)
{
struct cfq_data *cfqd;
int i;
Expand Down
2 changes: 1 addition & 1 deletion trunk/block/deadline-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ static void deadline_exit_queue(elevator_t *e)
/*
* initialize elevator private data (deadline_data).
*/
static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
static void *deadline_init_queue(request_queue_t *q)
{
struct deadline_data *dd;

Expand Down
4 changes: 2 additions & 2 deletions trunk/block/elevator.c
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ static struct elevator_type *elevator_get(const char *name)

static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
{
return eq->ops->elevator_init_fn(q, eq);
return eq->ops->elevator_init_fn(q);
}

static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
Expand Down Expand Up @@ -810,7 +810,7 @@ void elv_put_request(request_queue_t *q, struct request *rq)
elevator_t *e = q->elevator;

if (e->ops->elevator_put_req_fn)
e->ops->elevator_put_req_fn(q, rq);
e->ops->elevator_put_req_fn(rq);
}

int elv_may_queue(request_queue_t *q, int rw)
Expand Down
166 changes: 126 additions & 40 deletions trunk/block/ll_rw_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -2322,6 +2322,84 @@ void blk_insert_request(request_queue_t *q, struct request *rq,

EXPORT_SYMBOL(blk_insert_request);

static int __blk_rq_unmap_user(struct bio *bio)
{
int ret = 0;

if (bio) {
if (bio_flagged(bio, BIO_USER_MAPPED))
bio_unmap_user(bio);
else
ret = bio_uncopy_user(bio);
}

return ret;
}

static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
void __user *ubuf, unsigned int len)
{
unsigned long uaddr;
struct bio *bio, *orig_bio;
int reading, ret;

reading = rq_data_dir(rq) == READ;

/*
* if alignment requirement is satisfied, map in user pages for
* direct dma. else, set up kernel bounce buffers
*/
uaddr = (unsigned long) ubuf;
if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
bio = bio_map_user(q, NULL, uaddr, len, reading);
else
bio = bio_copy_user(q, uaddr, len, reading);

if (IS_ERR(bio)) {
return PTR_ERR(bio);
}

orig_bio = bio;
blk_queue_bounce(q, &bio);
/*
* We link the bounce buffer in and could have to traverse it
* later so we have to get a ref to prevent it from being freed
*/
bio_get(bio);

/*
* for most (all? don't know of any) queues we could
* skip grabbing the queue lock here. only drivers with
* funky private ->back_merge_fn() function could be
* problematic.
*/
spin_lock_irq(q->queue_lock);
if (!rq->bio)
blk_rq_bio_prep(q, rq, bio);
else if (!q->back_merge_fn(q, rq, bio)) {
ret = -EINVAL;
spin_unlock_irq(q->queue_lock);
goto unmap_bio;
} else {
rq->biotail->bi_next = bio;
rq->biotail = bio;

rq->nr_sectors += bio_sectors(bio);
rq->hard_nr_sectors = rq->nr_sectors;
rq->data_len += bio->bi_size;
}
spin_unlock_irq(q->queue_lock);

return bio->bi_size;

unmap_bio:
/* if it was boucned we must call the end io function */
bio_endio(bio, bio->bi_size, 0);
__blk_rq_unmap_user(orig_bio);
bio_put(bio);
return ret;
}

/**
* blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
* @q: request queue where request should be inserted
Expand All @@ -2343,42 +2421,44 @@ EXPORT_SYMBOL(blk_insert_request);
* unmapping.
*/
int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
unsigned int len)
unsigned long len)
{
unsigned long uaddr;
struct bio *bio;
int reading;
unsigned long bytes_read = 0;
int ret;

if (len > (q->max_hw_sectors << 9))
return -EINVAL;
if (!len || !ubuf)
return -EINVAL;

reading = rq_data_dir(rq) == READ;
while (bytes_read != len) {
unsigned long map_len, end, start;

/*
* if alignment requirement is satisfied, map in user pages for
* direct dma. else, set up kernel bounce buffers
*/
uaddr = (unsigned long) ubuf;
if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
bio = bio_map_user(q, NULL, uaddr, len, reading);
else
bio = bio_copy_user(q, uaddr, len, reading);
map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
>> PAGE_SHIFT;
start = (unsigned long)ubuf >> PAGE_SHIFT;

if (!IS_ERR(bio)) {
rq->bio = rq->biotail = bio;
blk_rq_bio_prep(q, rq, bio);
/*
* A bad offset could cause us to require BIO_MAX_PAGES + 1
* pages. If this happens we just lower the requested
* mapping len by a page so that we can fit
*/
if (end - start > BIO_MAX_PAGES)
map_len -= PAGE_SIZE;

rq->buffer = rq->data = NULL;
rq->data_len = len;
return 0;
ret = __blk_rq_map_user(q, rq, ubuf, map_len);
if (ret < 0)
goto unmap_rq;
bytes_read += ret;
ubuf += ret;
}

/*
* bio is the err-ptr
*/
return PTR_ERR(bio);
rq->buffer = rq->data = NULL;
return 0;
unmap_rq:
blk_rq_unmap_user(rq);
return ret;
}

EXPORT_SYMBOL(blk_rq_map_user);
Expand All @@ -2404,7 +2484,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
* unmapping.
*/
int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
struct sg_iovec *iov, int iov_count)
struct sg_iovec *iov, int iov_count, unsigned int len)
{
struct bio *bio;

Expand All @@ -2418,34 +2498,42 @@ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
if (IS_ERR(bio))
return PTR_ERR(bio);

rq->bio = rq->biotail = bio;
if (bio->bi_size != len) {
bio_endio(bio, bio->bi_size, 0);
bio_unmap_user(bio);
return -EINVAL;
}

bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
rq->buffer = rq->data = NULL;
rq->data_len = bio->bi_size;
return 0;
}

EXPORT_SYMBOL(blk_rq_map_user_iov);

/**
* blk_rq_unmap_user - unmap a request with user data
* @bio: bio to be unmapped
* @ulen: length of user buffer
* @rq: rq to be unmapped
*
* Description:
* Unmap a bio previously mapped by blk_rq_map_user().
* Unmap a rq previously mapped by blk_rq_map_user().
* rq->bio must be set to the original head of the request.
*/
int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
int blk_rq_unmap_user(struct request *rq)
{
int ret = 0;
struct bio *bio, *mapped_bio;

if (bio) {
if (bio_flagged(bio, BIO_USER_MAPPED))
bio_unmap_user(bio);
while ((bio = rq->bio)) {
if (bio_flagged(bio, BIO_BOUNCED))
mapped_bio = bio->bi_private;
else
ret = bio_uncopy_user(bio);
}
mapped_bio = bio;

__blk_rq_unmap_user(mapped_bio);
rq->bio = bio->bi_next;
bio_put(bio);
}
return 0;
}

Expand Down Expand Up @@ -2476,11 +2564,8 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
if (rq_data_dir(rq) == WRITE)
bio->bi_rw |= (1 << BIO_RW);

rq->bio = rq->biotail = bio;
blk_rq_bio_prep(q, rq, bio);

rq->buffer = rq->data = NULL;
rq->data_len = len;
return 0;
}

Expand Down Expand Up @@ -3495,6 +3580,7 @@ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
rq->hard_cur_sectors = rq->current_nr_sectors;
rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
rq->buffer = bio_data(bio);
rq->data_len = bio->bi_size;

rq->bio = rq->biotail = bio;
}
Expand Down
Loading

0 comments on commit 72aca23

Please sign in to comment.