Skip to content

Commit

Permalink
Merge branch 'for-2.6.29' of git://git.kernel.dk/linux-2.6-block
Browse files Browse the repository at this point in the history
* 'for-2.6.29' of git://git.kernel.dk/linux-2.6-block: (43 commits)
  bio: get rid of bio_vec clearing
  bounce: don't rely on a zeroed bio_vec list
  cciss: simplify parameters to deregister_disk function
  cfq-iosched: fix race between exiting queue and exiting task
  loop: Do not call loop_unplug for not configured loop device.
  loop: Flush possible running bios when loop device is released.
  alpha: remove dead BIO_VMERGE_BOUNDARY
  Get rid of CONFIG_LSF
  block: make blk_softirq_init() static
  block: use min_not_zero in blk_queue_stack_limits
  block: add one-hit cache for disk partition lookup
  cfq-iosched: remove limit of dispatch depth of max 4 times quantum
  nbd: tell the block layer that it is not a rotational device
  block: get rid of elevator_t typedef
  aio: make the lookup_ioctx() lockless
  bio: add support for inlining a number of bio_vecs inside the bio
  bio: allow individual slabs in the bio_set
  bio: move the slab pointer inside the bio_set
  bio: only mempool back the largest bio_vec slab cache
  block: don't use plugging on SSD devices
  ...
  • Loading branch information
Linus Torvalds committed Dec 31, 2008
2 parents 179475a + d3f7611 commit 1dff81f
Show file tree
Hide file tree
Showing 47 changed files with 1,089 additions and 751 deletions.
6 changes: 3 additions & 3 deletions Documentation/block/biodoc.txt
Original file line number Diff line number Diff line change
Expand Up @@ -914,7 +914,7 @@ I/O scheduler, a.k.a. elevator, is implemented in two layers. Generic dispatch
queue and specific I/O schedulers. Unless stated otherwise, elevator is used
to refer to both parts and I/O scheduler to specific I/O schedulers.

Block layer implements generic dispatch queue in ll_rw_blk.c and elevator.c.
Block layer implements generic dispatch queue in block/*.c.
The generic dispatch queue is responsible for properly ordering barrier
requests, requeueing, handling non-fs requests and all other subtleties.

Expand All @@ -926,8 +926,8 @@ be built inside the kernel. Each queue can choose different one and can also
change to another one dynamically.

A block layer call to the i/o scheduler follows the convention elv_xxx(). This
calls elevator_xxx_fn in the elevator switch (drivers/block/elevator.c). Oh,
xxx and xxx might not match exactly, but use your imagination. If an elevator
calls elevator_xxx_fn in the elevator switch (block/elevator.c). Oh, xxx
and xxx might not match exactly, but use your imagination. If an elevator
doesn't implement a function, the switch does nothing or some minimal house
keeping work.

Expand Down
3 changes: 0 additions & 3 deletions arch/alpha/include/asm/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,6 @@ static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page)
return page_to_phys(page);
}

/* This depends on working iommu. */
#define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0)

/* Maximum PIO space address supported? */
#define IO_SPACE_LIMIT 0xffff

Expand Down
4 changes: 2 additions & 2 deletions arch/s390/mm/pgtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ int s390_enable_sie(void)
/* lets check if we are allowed to replace the mm */
task_lock(tsk);
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
tsk->mm != tsk->active_mm || !hlist_empty(&tsk->mm->ioctx_list)) {
task_unlock(tsk);
return -EINVAL;
}
Expand All @@ -279,7 +279,7 @@ int s390_enable_sie(void)
/* Now lets check again if something happened */
task_lock(tsk);
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
tsk->mm != tsk->active_mm || !hlist_empty(&tsk->mm->ioctx_list)) {
mmput(mm);
task_unlock(tsk);
return -EINVAL;
Expand Down
23 changes: 5 additions & 18 deletions block/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -24,21 +24,17 @@ menuconfig BLOCK
if BLOCK

config LBD
bool "Support for Large Block Devices"
bool "Support for large block devices and files"
depends on !64BIT
help
Enable block devices of size 2TB and larger.
Enable block devices or files of size 2TB and larger.

This option is required to support the full capacity of large
(2TB+) block devices, including RAID, disk, Network Block Device,
Logical Volume Manager (LVM) and loopback.

For example, RAID devices are frequently bigger than the capacity
of the largest individual hard drive.

This option is not required if you have individual disk drives
which total 2TB+ and you are not aggregating the capacity into
a large block device (e.g. using RAID or LVM).

This option also enables support for single files larger than
2TB.

If unsure, say N.

Expand All @@ -58,15 +54,6 @@ config BLK_DEV_IO_TRACE

If unsure, say N.

config LSF
bool "Support for Large Single Files"
depends on !64BIT
help
Say Y here if you want to be able to handle very large files (2TB
and larger), otherwise say N.

If unsure, say Y.

config BLK_DEV_BSG
bool "Block layer SG support v4 (EXPERIMENTAL)"
depends on EXPERIMENTAL
Expand Down
10 changes: 5 additions & 5 deletions block/as-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1339,12 +1339,12 @@ static int as_may_queue(struct request_queue *q, int rw)
return ret;
}

static void as_exit_queue(elevator_t *e)
static void as_exit_queue(struct elevator_queue *e)
{
struct as_data *ad = e->elevator_data;

del_timer_sync(&ad->antic_timer);
kblockd_flush_work(&ad->antic_work);
cancel_work_sync(&ad->antic_work);

BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC]));
BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC]));
Expand Down Expand Up @@ -1409,7 +1409,7 @@ as_var_store(unsigned long *var, const char *page, size_t count)
return count;
}

static ssize_t est_time_show(elevator_t *e, char *page)
static ssize_t est_time_show(struct elevator_queue *e, char *page)
{
struct as_data *ad = e->elevator_data;
int pos = 0;
Expand All @@ -1427,7 +1427,7 @@ static ssize_t est_time_show(elevator_t *e, char *page)
}

#define SHOW_FUNCTION(__FUNC, __VAR) \
static ssize_t __FUNC(elevator_t *e, char *page) \
static ssize_t __FUNC(struct elevator_queue *e, char *page) \
{ \
struct as_data *ad = e->elevator_data; \
return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
Expand All @@ -1440,7 +1440,7 @@ SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]);
#undef SHOW_FUNCTION

#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
{ \
struct as_data *ad = e->elevator_data; \
int ret = as_var_store(__PTR, (page), count); \
Expand Down
120 changes: 76 additions & 44 deletions block/blk-barrier.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
int blk_queue_ordered(struct request_queue *q, unsigned ordered,
prepare_flush_fn *prepare_flush_fn)
{
if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
prepare_flush_fn == NULL) {
if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
QUEUE_ORDERED_DO_POSTFLUSH))) {
printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
return -EINVAL;
}
Expand Down Expand Up @@ -88,7 +88,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
return QUEUE_ORDSEQ_DONE;
}

void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
{
struct request *rq;

Expand All @@ -99,7 +99,7 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
q->ordseq |= seq;

if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
return;
return false;

/*
* Okay, sequence complete.
Expand All @@ -109,6 +109,8 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)

if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
BUG();

return true;
}

static void pre_flush_end_io(struct request *rq, int error)
Expand All @@ -134,7 +136,7 @@ static void queue_flush(struct request_queue *q, unsigned which)
struct request *rq;
rq_end_io_fn *end_io;

if (which == QUEUE_ORDERED_PREFLUSH) {
if (which == QUEUE_ORDERED_DO_PREFLUSH) {
rq = &q->pre_flush_rq;
end_io = pre_flush_end_io;
} else {
Expand All @@ -151,80 +153,110 @@ static void queue_flush(struct request_queue *q, unsigned which)
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
}

static inline struct request *start_ordered(struct request_queue *q,
struct request *rq)
static inline bool start_ordered(struct request_queue *q, struct request **rqp)
{
struct request *rq = *rqp;
unsigned skip = 0;

q->orderr = 0;
q->ordered = q->next_ordered;
q->ordseq |= QUEUE_ORDSEQ_STARTED;

/*
* Prep proxy barrier request.
* For an empty barrier, there's no actual BAR request, which
* in turn makes POSTFLUSH unnecessary. Mask them off.
*/
if (!rq->hard_nr_sectors) {
q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
QUEUE_ORDERED_DO_POSTFLUSH);
/*
* Empty barrier on a write-through device w/ ordered
* tag has no command to issue and without any command
* to issue, ordering by tag can't be used. Drain
* instead.
*/
if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
!(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
q->ordered &= ~QUEUE_ORDERED_BY_TAG;
q->ordered |= QUEUE_ORDERED_BY_DRAIN;
}
}

/* stash away the original request */
elv_dequeue_request(q, rq);
q->orig_bar_rq = rq;
rq = &q->bar_rq;
blk_rq_init(q, rq);
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
rq->cmd_flags |= REQ_RW;
if (q->ordered & QUEUE_ORDERED_FUA)
rq->cmd_flags |= REQ_FUA;
init_request_from_bio(rq, q->orig_bar_rq->bio);
rq->end_io = bar_end_io;
rq = NULL;

/*
* Queue ordered sequence. As we stack them at the head, we
* need to queue in reverse order. Note that we rely on that
* no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
* request gets inbetween ordered sequence. If this request is
* an empty barrier, we don't need to do a postflush ever since
* there will be no data written between the pre and post flush.
* Hence a single flush will suffice.
* request gets inbetween ordered sequence.
*/
if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
else
q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
rq = &q->post_flush_rq;
} else
skip |= QUEUE_ORDSEQ_POSTFLUSH;

elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
if (q->ordered & QUEUE_ORDERED_DO_BAR) {
rq = &q->bar_rq;

/* initialize proxy request and queue it */
blk_rq_init(q, rq);
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
rq->cmd_flags |= REQ_RW;
if (q->ordered & QUEUE_ORDERED_DO_FUA)
rq->cmd_flags |= REQ_FUA;
init_request_from_bio(rq, q->orig_bar_rq->bio);
rq->end_io = bar_end_io;

if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
queue_flush(q, QUEUE_ORDERED_PREFLUSH);
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
} else
skip |= QUEUE_ORDSEQ_BAR;

if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
rq = &q->pre_flush_rq;
} else
q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
skip |= QUEUE_ORDSEQ_PREFLUSH;

if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
q->ordseq |= QUEUE_ORDSEQ_DRAIN;
else
if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
rq = NULL;
else
skip |= QUEUE_ORDSEQ_DRAIN;

*rqp = rq;

return rq;
/*
* Complete skipped sequences. If whole sequence is complete,
* return false to tell elevator that this request is gone.
*/
return !blk_ordered_complete_seq(q, skip, 0);
}

int blk_do_ordered(struct request_queue *q, struct request **rqp)
bool blk_do_ordered(struct request_queue *q, struct request **rqp)
{
struct request *rq = *rqp;
const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);

if (!q->ordseq) {
if (!is_barrier)
return 1;
return true;

if (q->next_ordered != QUEUE_ORDERED_NONE) {
*rqp = start_ordered(q, rq);
return 1;
} else {
if (q->next_ordered != QUEUE_ORDERED_NONE)
return start_ordered(q, rqp);
else {
/*
* This can happen when the queue switches to
* ORDERED_NONE while this request is on it.
* Queue ordering not supported. Terminate
* with prejudice.
*/
elv_dequeue_request(q, rq);
if (__blk_end_request(rq, -EOPNOTSUPP,
blk_rq_bytes(rq)))
BUG();
*rqp = NULL;
return 0;
return false;
}
}

Expand All @@ -235,9 +267,9 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
/* Special requests are not subject to ordering rules. */
if (!blk_fs_request(rq) &&
rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
return 1;
return true;

if (q->ordered & QUEUE_ORDERED_TAG) {
if (q->ordered & QUEUE_ORDERED_BY_TAG) {
/* Ordered by tag. Blocking the next barrier is enough. */
if (is_barrier && rq != &q->bar_rq)
*rqp = NULL;
Expand All @@ -248,7 +280,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
*rqp = NULL;
}

return 1;
return true;
}

static void bio_end_empty_barrier(struct bio *bio, int err)
Expand Down
Loading

0 comments on commit 1dff81f

Please sign in to comment.