Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 213180
b: refs/heads/master
c: 4fed947
h: refs/heads/master
v: v3
  • Loading branch information
Tejun Heo authored and Jens Axboe committed Sep 10, 2010
1 parent cea167b commit 447282a
Show file tree
Hide file tree
Showing 8 changed files with 68 additions and 85 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: dd4c133f387c48f526022860ad70354637a80f4c
refs/heads/master: 4fed947cb311e5aa51781d316cefca836352f6ce
2 changes: 1 addition & 1 deletion trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1204,7 +1204,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)

spin_lock_irq(q->queue_lock);

if (bio->bi_rw & REQ_HARDBARRIER) {
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
where = ELEVATOR_INSERT_FRONT;
goto get_rq;
}
Expand Down
85 changes: 46 additions & 39 deletions trunk/block/blk-flush.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Functions related to barrier IO handling
* Functions to sequence FLUSH and FUA writes.
*/
#include <linux/kernel.h>
#include <linux/module.h>
Expand All @@ -9,6 +9,15 @@

#include "blk.h"

/* FLUSH/FUA sequences */
enum {
QUEUE_FSEQ_STARTED = (1 << 0), /* flushing in progress */
QUEUE_FSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */
QUEUE_FSEQ_DATA = (1 << 2), /* data write in progress */
QUEUE_FSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */
QUEUE_FSEQ_DONE = (1 << 4),
};

static struct request *queue_next_fseq(struct request_queue *q);

unsigned blk_flush_cur_seq(struct request_queue *q)
Expand Down Expand Up @@ -79,6 +88,7 @@ static void queue_flush(struct request_queue *q, struct request *rq,

static struct request *queue_next_fseq(struct request_queue *q)
{
struct request *orig_rq = q->orig_flush_rq;
struct request *rq = &q->flush_rq;

switch (blk_flush_cur_seq(q)) {
Expand All @@ -87,12 +97,11 @@ static struct request *queue_next_fseq(struct request_queue *q)
break;

case QUEUE_FSEQ_DATA:
/* initialize proxy request and queue it */
/* initialize proxy request, inherit FLUSH/FUA and queue it */
blk_rq_init(q, rq);
init_request_from_bio(rq, q->orig_flush_rq->bio);
rq->cmd_flags &= ~REQ_HARDBARRIER;
if (q->ordered & QUEUE_ORDERED_DO_FUA)
rq->cmd_flags |= REQ_FUA;
init_request_from_bio(rq, orig_rq->bio);
rq->cmd_flags &= ~(REQ_FLUSH | REQ_FUA);
rq->cmd_flags |= orig_rq->cmd_flags & (REQ_FLUSH | REQ_FUA);
rq->end_io = flush_data_end_io;

elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
Expand All @@ -110,60 +119,58 @@ static struct request *queue_next_fseq(struct request_queue *q)

struct request *blk_do_flush(struct request_queue *q, struct request *rq)
{
unsigned int fflags = q->flush_flags; /* may change, cache it */
bool has_flush = fflags & REQ_FLUSH, has_fua = fflags & REQ_FUA;
bool do_preflush = has_flush && (rq->cmd_flags & REQ_FLUSH);
bool do_postflush = has_flush && !has_fua && (rq->cmd_flags & REQ_FUA);
unsigned skip = 0;

if (!(rq->cmd_flags & REQ_HARDBARRIER))
/*
* Special case. If there's data but flush is not necessary,
* the request can be issued directly.
*
* Flush w/o data should be able to be issued directly too but
* currently some drivers assume that rq->bio contains
* non-zero data if it isn't NULL and empty FLUSH requests
* getting here usually have bio's without data.
*/
if (blk_rq_sectors(rq) && !do_preflush && !do_postflush) {
rq->cmd_flags &= ~REQ_FLUSH;
if (!has_fua)
rq->cmd_flags &= ~REQ_FUA;
return rq;
}

/*
* Sequenced flushes can't be processed in parallel. If
* another one is already in progress, queue for later
* processing.
*/
if (q->flush_seq) {
/*
* Sequenced flush is already in progress and they
* can't be processed in parallel. Queue for later
* processing.
*/
list_move_tail(&rq->queuelist, &q->pending_flushes);
return NULL;
}

if (unlikely(q->next_ordered == QUEUE_ORDERED_NONE)) {
/*
* Queue ordering not supported. Terminate
* with prejudice.
*/
blk_dequeue_request(rq);
__blk_end_request_all(rq, -EOPNOTSUPP);
return NULL;
}

/*
* Start a new flush sequence
*/
q->flush_err = 0;
q->ordered = q->next_ordered;
q->flush_seq |= QUEUE_FSEQ_STARTED;

/*
* For an empty barrier, there's no actual BAR request, which
* in turn makes POSTFLUSH unnecessary. Mask them off.
*/
if (!blk_rq_sectors(rq))
q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
QUEUE_ORDERED_DO_POSTFLUSH);

/* stash away the original request */
/* adjust FLUSH/FUA of the original request and stash it away */
rq->cmd_flags &= ~REQ_FLUSH;
if (!has_fua)
rq->cmd_flags &= ~REQ_FUA;
blk_dequeue_request(rq);
q->orig_flush_rq = rq;

if (!(q->ordered & QUEUE_ORDERED_DO_PREFLUSH))
/* skip unneded sequences and return the first one */
if (!do_preflush)
skip |= QUEUE_FSEQ_PREFLUSH;

if (!(q->ordered & QUEUE_ORDERED_DO_BAR))
if (!blk_rq_sectors(rq))
skip |= QUEUE_FSEQ_DATA;

if (!(q->ordered & QUEUE_ORDERED_DO_POSTFLUSH))
if (!do_postflush)
skip |= QUEUE_FSEQ_POSTFLUSH;

/* complete skipped sequences and return the first sequence */
return blk_flush_complete_seq(q, skip, 0);
}

Expand Down
3 changes: 3 additions & 0 deletions trunk/block/blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ static inline struct request *__elv_next_request(struct request_queue *q)
while (1) {
while (!list_empty(&q->queue_head)) {
rq = list_entry_rq(q->queue_head.next);
if (!(rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) ||
rq == &q->flush_rq)
return rq;
rq = blk_do_flush(q, rq);
if (rq)
return rq;
Expand Down
2 changes: 1 addition & 1 deletion trunk/include/linux/blk_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ enum rq_flag_bits {
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \
REQ_META| REQ_DISCARD | REQ_NOIDLE)
REQ_META | REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)

#define REQ_UNPLUG (1 << __REQ_UNPLUG)
#define REQ_RAHEAD (1 << __REQ_RAHEAD)
Expand Down
38 changes: 2 additions & 36 deletions trunk/include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,6 @@ struct request_queue
/*
* for flush operations
*/
unsigned int ordered, next_ordered;
unsigned int flush_flags;
unsigned int flush_seq;
int flush_err;
Expand Down Expand Up @@ -465,40 +464,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
__clear_bit(flag, &q->queue_flags);
}

enum {
/*
* Hardbarrier is supported with one of the following methods.
*
* NONE : hardbarrier unsupported
* DRAIN : ordering by draining is enough
* DRAIN_FLUSH : ordering by draining w/ pre and post flushes
* DRAIN_FUA : ordering by draining w/ pre flush and FUA write
*/
QUEUE_ORDERED_DO_PREFLUSH = 0x10,
QUEUE_ORDERED_DO_BAR = 0x20,
QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
QUEUE_ORDERED_DO_FUA = 0x80,

QUEUE_ORDERED_NONE = 0x00,

QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_DO_BAR,
QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
QUEUE_ORDERED_DO_PREFLUSH |
QUEUE_ORDERED_DO_POSTFLUSH,
QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
QUEUE_ORDERED_DO_PREFLUSH |
QUEUE_ORDERED_DO_FUA,

/*
* FLUSH/FUA sequences.
*/
QUEUE_FSEQ_STARTED = (1 << 0), /* flushing in progress */
QUEUE_FSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */
QUEUE_FSEQ_DATA = (1 << 2), /* data write in progress */
QUEUE_FSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */
QUEUE_FSEQ_DONE = (1 << 4),
};

#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
Expand Down Expand Up @@ -578,7 +543,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
* it already be started by driver.
*/
#define RQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \
REQ_FLUSH | REQ_FUA)
#define rq_mergeable(rq) \
(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
(((rq)->cmd_flags & REQ_DISCARD) || \
Expand Down
2 changes: 1 addition & 1 deletion trunk/include/linux/buffer_head.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ enum bh_state_bits {
BH_Delay, /* Buffer is not yet allocated on disk */
BH_Boundary, /* Block is followed by a discontiguity */
BH_Write_EIO, /* I/O error on write */
BH_Eopnotsupp, /* operation not supported (barrier) */
BH_Eopnotsupp, /* DEPRECATED: operation not supported (barrier) */
BH_Unwritten, /* Buffer is allocated on disk but not written */
BH_Quiet, /* Buffer Error Prinks to be quiet */

Expand Down
19 changes: 13 additions & 6 deletions trunk/include/linux/fs.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,12 +135,13 @@ struct inodes_stat_t {
* immediately after submission. The write equivalent
* of READ_SYNC.
* WRITE_ODIRECT_PLUG Special case write for O_DIRECT only.
* WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all
* previously submitted writes must be safely on storage
* before this one is started. Also guarantees that when
* this write is complete, it itself is also safely on
* storage. Prevents reordering of writes on both sides
* of this IO.
* WRITE_BARRIER DEPRECATED. Always fails. Use FLUSH/FUA instead.
* WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush.
* WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on
* non-volatile media on completion.
* WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded
* by a cache flush and data is guaranteed to be on
* non-volatile media on completion.
*
*/
#define RW_MASK REQ_WRITE
Expand All @@ -158,6 +159,12 @@ struct inodes_stat_t {
#define WRITE_META (WRITE | REQ_META)
#define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
REQ_HARDBARRIER)
#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
REQ_FLUSH)
#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
REQ_FUA)
#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
REQ_FLUSH | REQ_FUA)

/*
* These aren't really reads or writes, they pass down information about
Expand Down

0 comments on commit 447282a

Please sign in to comment.