Skip to content

Commit

Permalink
Merge branch 'for-linus' into for-2.6.35
Browse files Browse the repository at this point in the history
  • Loading branch information
Jens Axboe committed Apr 2, 2010
2 parents 2eaa9cf + a506aed commit ed6b6dc
Show file tree
Hide file tree
Showing 33 changed files with 461 additions and 176 deletions.
13 changes: 13 additions & 0 deletions Documentation/DocBook/tracepoint.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,15 @@
</address>
</affiliation>
</author>
<author>
<firstname>William</firstname>
<surname>Cohen</surname>
<affiliation>
<address>
<email>wcohen@redhat.com</email>
</address>
</affiliation>
</author>
</authorgroup>

<legalnotice>
Expand Down Expand Up @@ -91,4 +100,8 @@
!Iinclude/trace/events/signal.h
</chapter>

<chapter id="block">
<title>Block IO</title>
!Iinclude/trace/events/block.h
</chapter>
</book>
4 changes: 2 additions & 2 deletions Documentation/block/biodoc.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1162,8 +1162,8 @@ where a driver received a request ala this before:

As mentioned, there is no virtual mapping of a bio. For DMA, this is
not a problem as the driver probably never will need a virtual mapping.
Instead it needs a bus mapping (pci_map_page for a single segment or
use blk_rq_map_sg for scatter gather) to be able to ship it to the driver. For
Instead it needs a bus mapping (dma_map_page for a single segment or
use dma_map_sg for scatter gather) to be able to ship it to the driver. For
PIO drivers (or drivers that need to revert to PIO transfer once in a
while (IDE for example)), where the CPU is doing the actual data
transfer a virtual mapping is needed. If the driver supports highmem I/O,
Expand Down
3 changes: 2 additions & 1 deletion block/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,9 @@ config BLK_DEV_INTEGRITY
Protection. If in doubt, say N.

config BLK_CGROUP
tristate
tristate "Block cgroup support"
depends on CGROUPS
depends on CFQ_GROUP_IOSCHED
default n
---help---
Generic block IO controller cgroup interface. This is the common
Expand Down
11 changes: 1 addition & 10 deletions block/blk-settings.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <linux/blkdev.h>
#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
#include <linux/gcd.h>
#include <linux/lcm.h>
#include <linux/jiffies.h>

#include "blk.h"
Expand Down Expand Up @@ -461,16 +462,6 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
}
EXPORT_SYMBOL(blk_queue_stack_limits);

static unsigned int lcm(unsigned int a, unsigned int b)
{
if (a && b)
return (a * b) / gcd(a, b);
else if (b)
return b;

return a;
}

/**
* blk_stack_limits - adjust queue_limits for stacked devices
* @t: the stacking driver limits (top device)
Expand Down
25 changes: 25 additions & 0 deletions block/blk-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,19 @@ static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
return queue_var_show(max_sectors_kb, (page));
}

static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
{
return queue_var_show(queue_max_segments(q), (page));
}

static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
{
if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
return queue_var_show(queue_max_segment_size(q), (page));

return queue_var_show(PAGE_CACHE_SIZE, (page));
}

static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
{
return queue_var_show(queue_logical_block_size(q), page);
Expand Down Expand Up @@ -280,6 +293,16 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
.show = queue_max_hw_sectors_show,
};

static struct queue_sysfs_entry queue_max_segments_entry = {
.attr = {.name = "max_segments", .mode = S_IRUGO },
.show = queue_max_segments_show,
};

static struct queue_sysfs_entry queue_max_segment_size_entry = {
.attr = {.name = "max_segment_size", .mode = S_IRUGO },
.show = queue_max_segment_size_show,
};

static struct queue_sysfs_entry queue_iosched_entry = {
.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
.show = elv_iosched_show,
Expand Down Expand Up @@ -355,6 +378,8 @@ static struct attribute *default_attrs[] = {
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
&queue_max_sectors_entry.attr,
&queue_max_segments_entry.attr,
&queue_max_segment_size_entry.attr,
&queue_iosched_entry.attr,
&queue_hw_sector_size_entry.attr,
&queue_logical_block_size_entry.attr,
Expand Down
29 changes: 21 additions & 8 deletions block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ static const int cfq_hist_divisor = 4;
#define CFQ_SERVICE_SHIFT 12

#define CFQQ_SEEK_THR (sector_t)(8 * 100)
#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)

Expand Down Expand Up @@ -1517,7 +1518,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
if (cfqq) {
cfq_log_cfqq(cfqd, cfqq, "set_active");
cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
cfqd->serving_prio, cfqd->serving_type);
cfqq->slice_start = 0;
cfqq->dispatch_start = jiffies;
cfqq->allocated_slice = 0;
Expand Down Expand Up @@ -1660,9 +1662,9 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
}

static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct request *rq, bool for_preempt)
struct request *rq)
{
return cfq_dist_from_last(cfqd, rq) <= CFQQ_SEEK_THR;
return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
}

static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
Expand All @@ -1689,7 +1691,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
* will contain the closest sector.
*/
__cfqq = rb_entry(parent, struct cfq_queue, p_node);
if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
return __cfqq;

if (blk_rq_pos(__cfqq->next_rq) < sector)
Expand All @@ -1700,7 +1702,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
return NULL;

__cfqq = rb_entry(node, struct cfq_queue, p_node);
if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
return __cfqq;

return NULL;
Expand All @@ -1721,6 +1723,8 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
{
struct cfq_queue *cfqq;

if (cfq_class_idle(cur_cfqq))
return NULL;
if (!cfq_cfqq_sync(cur_cfqq))
return NULL;
if (CFQQ_SEEKY(cur_cfqq))
Expand Down Expand Up @@ -1787,7 +1791,11 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* Otherwise, we do only if they are the last ones
* in their service tree.
*/
return service_tree->count == 1 && cfq_cfqq_sync(cfqq);
if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
return 1;
cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
service_tree->count);
return 0;
}

static void cfq_arm_slice_timer(struct cfq_data *cfqd)
Expand Down Expand Up @@ -1832,8 +1840,11 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
* time slice.
*/
if (sample_valid(cic->ttime_samples) &&
(cfqq->slice_end - jiffies < cic->ttime_mean))
(cfqq->slice_end - jiffies < cic->ttime_mean)) {
cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
cic->ttime_mean);
return;
}

cfq_mark_cfqq_wait_request(cfqq);

Expand Down Expand Up @@ -2041,6 +2052,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
slice = max(slice, 2 * cfqd->cfq_slice_idle);

slice = max_t(unsigned, slice, CFQ_MIN_TT);
cfq_log(cfqd, "workload slice:%d", slice);
cfqd->workload_expires = jiffies + slice;
cfqd->noidle_tree_requires_idle = false;
}
Expand Down Expand Up @@ -3103,7 +3115,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
* if this request is as-good as one we would expect from the
* current cfqq, let it preempt
*/
if (cfq_rq_close(cfqd, cfqq, rq, true))
if (cfq_rq_close(cfqd, cfqq, rq))
return true;

return false;
Expand Down Expand Up @@ -3307,6 +3319,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
if (cfq_should_wait_busy(cfqd, cfqq)) {
cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
cfq_mark_cfqq_wait_busy(cfqq);
cfq_log_cfqq(cfqd, cfqq, "will busy wait");
}

/*
Expand Down
2 changes: 1 addition & 1 deletion block/elevator.c
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ static struct elevator_type *elevator_get(const char *name)

spin_unlock(&elv_list_lock);

sprintf(elv, "%s-iosched", name);
snprintf(elv, sizeof(elv), "%s-iosched", name);

request_module("%s", elv);
spin_lock(&elv_list_lock);
Expand Down
1 change: 0 additions & 1 deletion drivers/block/DAC960.c
Original file line number Diff line number Diff line change
Expand Up @@ -2533,7 +2533,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
Controller->RequestQueue[n] = RequestQueue;
blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
RequestQueue->queuedata = Controller;
blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit);
blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
disk->queue = RequestQueue;
Expand Down
1 change: 1 addition & 0 deletions drivers/block/cciss.c
Original file line number Diff line number Diff line change
Expand Up @@ -3341,6 +3341,7 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
printk(KERN_WARNING
"cciss: controller cciss%d failed, stopping.\n",
h->ctlr);
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
fail_all_cmds(h->ctlr);
return IRQ_HANDLED;
}
Expand Down
19 changes: 14 additions & 5 deletions drivers/block/drbd/drbd_actlog.c
Original file line number Diff line number Diff line change
Expand Up @@ -536,7 +536,9 @@ static void atodb_endio(struct bio *bio, int error)
put_ldev(mdev);
}

/* sector to word */
#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))

/* activity log to on disk bitmap -- prepare bio unless that sector
* is already covered by previously prepared bios */
static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
Expand All @@ -546,13 +548,20 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
{
struct bio *bio;
struct page *page;
sector_t on_disk_sector = enr + mdev->ldev->md.md_offset
+ mdev->ldev->md.bm_offset;
sector_t on_disk_sector;
unsigned int page_offset = PAGE_SIZE;
int offset;
int i = 0;
int err = -ENOMEM;

/* We always write aligned, full 4k blocks,
* so we can ignore the logical_block_size (for now) */
enr &= ~7U;
on_disk_sector = enr + mdev->ldev->md.md_offset
+ mdev->ldev->md.bm_offset;

D_ASSERT(!(on_disk_sector & 7U));

/* Check if that enr is already covered by an already created bio.
* Caution, bios[] is not NULL terminated,
* but only initialized to all NULL.
Expand Down Expand Up @@ -588,7 +597,7 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev,

offset = S2W(enr);
drbd_bm_get_lel(mdev, offset,
min_t(size_t, S2W(1), drbd_bm_words(mdev) - offset),
min_t(size_t, S2W(8), drbd_bm_words(mdev) - offset),
kmap(page) + page_offset);
kunmap(page);

Expand All @@ -597,7 +606,7 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
bio->bi_bdev = mdev->ldev->md_bdev;
bio->bi_sector = on_disk_sector;

if (bio_add_page(bio, page, MD_SECTOR_SIZE, page_offset) != MD_SECTOR_SIZE)
if (bio_add_page(bio, page, 4096, page_offset) != 4096)
goto out_put_page;

atomic_inc(&wc->count);
Expand Down Expand Up @@ -1327,7 +1336,7 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
/* ok, ->resync is there. */
for (i = 0; i < mdev->resync->nr_elements; i++) {
e = lc_element_by_index(mdev->resync, i);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
bm_ext = lc_entry(e, struct bm_extent, lce);
if (bm_ext->lce.lc_number == LC_FREE)
continue;
if (bm_ext->lce.lc_number == mdev->resync_wenr) {
Expand Down
10 changes: 5 additions & 5 deletions drivers/block/drbd/drbd_bitmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ struct drbd_bitmap {
size_t bm_words;
size_t bm_number_of_pages;
sector_t bm_dev_capacity;
struct semaphore bm_change; /* serializes resize operations */
struct mutex bm_change; /* serializes resize operations */

atomic_t bm_async_io;
wait_queue_head_t bm_io_wait;
Expand Down Expand Up @@ -114,7 +114,7 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why)
return;
}

trylock_failed = down_trylock(&b->bm_change);
trylock_failed = !mutex_trylock(&b->bm_change);

if (trylock_failed) {
dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
Expand All @@ -125,7 +125,7 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why)
b->bm_task == mdev->receiver.task ? "receiver" :
b->bm_task == mdev->asender.task ? "asender" :
b->bm_task == mdev->worker.task ? "worker" : "?");
down(&b->bm_change);
mutex_lock(&b->bm_change);
}
if (__test_and_set_bit(BM_LOCKED, &b->bm_flags))
dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
Expand All @@ -147,7 +147,7 @@ void drbd_bm_unlock(struct drbd_conf *mdev)

b->bm_why = NULL;
b->bm_task = NULL;
up(&b->bm_change);
mutex_unlock(&b->bm_change);
}

/* word offset to long pointer */
Expand Down Expand Up @@ -295,7 +295,7 @@ int drbd_bm_init(struct drbd_conf *mdev)
if (!b)
return -ENOMEM;
spin_lock_init(&b->bm_lock);
init_MUTEX(&b->bm_change);
mutex_init(&b->bm_change);
init_waitqueue_head(&b->bm_io_wait);

mdev->bitmap = b;
Expand Down
12 changes: 11 additions & 1 deletion drivers/block/drbd/drbd_int.h
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,9 @@ static inline const char *cmdname(enum drbd_packets cmd)
[P_OV_REQUEST] = "OVRequest",
[P_OV_REPLY] = "OVReply",
[P_OV_RESULT] = "OVResult",
[P_CSUM_RS_REQUEST] = "CsumRSRequest",
[P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
[P_COMPRESSED_BITMAP] = "CBitmap",
[P_MAX_CMD] = NULL,
};

Expand Down Expand Up @@ -443,13 +446,18 @@ struct p_rs_param_89 {
char csums_alg[SHARED_SECRET_MAX];
} __packed;

enum drbd_conn_flags {
CF_WANT_LOSE = 1,
CF_DRY_RUN = 2,
};

struct p_protocol {
struct p_header head;
u32 protocol;
u32 after_sb_0p;
u32 after_sb_1p;
u32 after_sb_2p;
u32 want_lose;
u32 conn_flags;
u32 two_primaries;

/* Since protocol version 87 and higher. */
Expand Down Expand Up @@ -791,6 +799,8 @@ enum {
* while this is set. */
RESIZE_PENDING, /* Size change detected locally, waiting for the response from
* the peer, if it changed there as well. */
CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */
};

struct drbd_bitmap; /* opaque for drbd_conf */
Expand Down
Loading

0 comments on commit ed6b6dc

Please sign in to comment.