Skip to content

Commit

Permalink
Merge branch 'for-2.6.30' of git://git.kernel.dk/linux-2.6-block
Browse files Browse the repository at this point in the history
* 'for-2.6.30' of git://git.kernel.dk/linux-2.6-block:
  Get rid of pdflush_operation() in emergency sync and remount
  btrfs: get rid of current_is_pdflush() in btrfs_btree_balance_dirty
  Move the default_backing_dev_info out of readahead.c and into backing-dev.c
  block: Repeated lines in switching-sched.txt
  bsg: Remove bogus check against request_queue->max_sectors
  block: WARN in __blk_put_request() for potential bio leak
  loop: fix circular locking in loop_clr_fd()
  loop: support barrier writes
  bsg: add support for tail queuing
  cpqarray: enable bus mastering
  block: genhd.h cleanup patch
  block: add private bio_set for bio integrity allocations
  block: genhd.h comment needs updating
  block: get rid of unused blkdev_free_rq() define
  block: remove various blk_queue_*() setting functions in blk_init_queue_node()
  cciss: add BUILD_BUG_ON() for catching bad CommandList_struct alignment
  block: don't create bio_vec slabs of less than the inline number
  block: cleanup bio_alloc_bioset()
  • Loading branch information
Linus Torvalds committed Mar 26, 2009
2 parents 413e337 + a2a9537 commit 86d9c07
Show file tree
Hide file tree
Showing 18 changed files with 205 additions and 186 deletions.
6 changes: 0 additions & 6 deletions Documentation/block/switching-sched.txt
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,3 @@ noop anticipatory deadline [cfq]
# echo anticipatory > /sys/block/hda/queue/scheduler
# cat /sys/block/hda/queue/scheduler
noop [anticipatory] deadline cfq

Each io queue has a set of io scheduler tunables associated with it. These
tunables control how the io scheduler works. You can find these entries
in:

/sys/block/<device>/queue/iosched
13 changes: 6 additions & 7 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -603,13 +603,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
q->queue_flags = QUEUE_FLAG_DEFAULT;
q->queue_lock = lock;

blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);

/*
* This also sets hw/phys segments, boundary and size
*/
blk_queue_make_request(q, __make_request);
blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);

blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);

q->sg_reserved_size = INT_MAX;

Expand Down Expand Up @@ -735,7 +732,6 @@ static void freed_request(struct request_queue *q, int rw, int priv)
__freed_request(q, rw ^ 1);
}

#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
/*
* Get a free request, queue_lock must be held.
* Returns NULL on failure, with queue_lock held.
Expand Down Expand Up @@ -1066,6 +1062,9 @@ void __blk_put_request(struct request_queue *q, struct request *req)

elv_completed_request(q, req);

/* this is a bio leak */
WARN_ON(req->bio != NULL);

/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
Expand Down
2 changes: 2 additions & 0 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -403,6 +403,8 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (blk_rq_cpu_valid(next))
req->cpu = next->cpu;

/* owner-ship of bio passed from next to req */
next->bio = NULL;
__blk_put_request(q, next);
return 1;
}
Expand Down
12 changes: 7 additions & 5 deletions block/bsg.c
Original file line number Diff line number Diff line change
Expand Up @@ -218,9 +218,6 @@ bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)

if (hdr->guard != 'Q')
return -EINVAL;
if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
hdr->din_xfer_len > (q->max_sectors << 9))
return -EIO;

switch (hdr->protocol) {
case BSG_PROTOCOL_SCSI:
Expand Down Expand Up @@ -353,6 +350,8 @@ static void bsg_rq_end_io(struct request *rq, int uptodate)
static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
struct bsg_command *bc, struct request *rq)
{
int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));

/*
* add bc command to busy queue and submit rq for io
*/
Expand All @@ -368,7 +367,7 @@ static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);

rq->end_io_data = bc;
blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
}

static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
Expand Down Expand Up @@ -924,6 +923,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct request *rq;
struct bio *bio, *bidi_bio = NULL;
struct sg_io_v4 hdr;
int at_head;
u8 sense[SCSI_SENSE_BUFFERSIZE];

if (copy_from_user(&hdr, uarg, sizeof(hdr)))
Expand All @@ -936,7 +936,9 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
bio = rq->bio;
if (rq->next_rq)
bidi_bio = rq->next_rq->bio;
blk_execute_rq(bd->queue, NULL, rq, 0);

at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
blk_execute_rq(bd->queue, NULL, rq, at_head);
ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);

if (copy_to_user(uarg, &hdr, sizeof(hdr)))
Expand Down
21 changes: 4 additions & 17 deletions block/scsi_ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -214,21 +214,10 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
return 0;
}

/*
* unmap a request that was previously mapped to this sg_io_hdr. handles
* both sg and non-sg sg_io_hdr.
*/
static int blk_unmap_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr)
{
blk_rq_unmap_user(rq->bio);
blk_put_request(rq);
return 0;
}

static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
struct bio *bio)
{
int r, ret = 0;
int ret = 0;

/*
* fill in all the output members
Expand All @@ -253,12 +242,10 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
ret = -EFAULT;
}

rq->bio = bio;
r = blk_unmap_sghdr_rq(rq, hdr);
if (ret)
r = ret;
blk_rq_unmap_user(bio);
blk_put_request(rq);

return r;
return ret;
}

static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
Expand Down
7 changes: 7 additions & 0 deletions drivers/block/cciss.c
Original file line number Diff line number Diff line change
Expand Up @@ -3898,6 +3898,13 @@ static struct pci_driver cciss_pci_driver = {
*/
static int __init cciss_init(void)
{
/*
* The hardware requires that commands are aligned on a 64-bit
* boundary. Given that we use pci_alloc_consistent() to allocate an
* array of them, the size must be a multiple of 8 bytes.
*/
BUILD_BUG_ON(sizeof(CommandList_struct) % 8);

printk(KERN_INFO DRIVER_NAME "\n");

/* Register for our PCI devices */
Expand Down
1 change: 1 addition & 0 deletions drivers/block/cpqarray.c
Original file line number Diff line number Diff line change
Expand Up @@ -617,6 +617,7 @@ static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
int i;

c->pci_dev = pdev;
pci_set_master(pdev);
if (pci_enable_device(pdev)) {
printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
return -1;
Expand Down
48 changes: 44 additions & 4 deletions drivers/block/loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -474,10 +474,35 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
int ret;

pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
if (bio_rw(bio) == WRITE)

if (bio_rw(bio) == WRITE) {
int barrier = bio_barrier(bio);
struct file *file = lo->lo_backing_file;

if (barrier) {
if (unlikely(!file->f_op->fsync)) {
ret = -EOPNOTSUPP;
goto out;
}

ret = vfs_fsync(file, file->f_path.dentry, 0);
if (unlikely(ret)) {
ret = -EIO;
goto out;
}
}

ret = lo_send(lo, bio, pos);
else

if (barrier && !ret) {
ret = vfs_fsync(file, file->f_path.dentry, 0);
if (unlikely(ret))
ret = -EIO;
}
} else
ret = lo_receive(lo, bio, lo->lo_blocksize, pos);

out:
return ret;
}

Expand Down Expand Up @@ -826,6 +851,9 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->lo_queue->queuedata = lo;
lo->lo_queue->unplug_fn = loop_unplug;

if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN, NULL);

set_capacity(lo->lo_disk, size);
bd_set_size(bdev, size << 9);

Expand Down Expand Up @@ -941,11 +969,18 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
bd_set_size(bdev, 0);
mapping_set_gfp_mask(filp->f_mapping, gfp);
lo->lo_state = Lo_unbound;
fput(filp);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
if (max_part > 0)
ioctl_by_bdev(bdev, BLKRRPART, 0);
mutex_unlock(&lo->lo_ctl_mutex);
/*
* Need not hold lo_ctl_mutex to fput backing file.
* Calling fput holding lo_ctl_mutex triggers a circular
* lock dependency possibility warning as fput can take
* bd_mutex which is usually taken before lo_ctl_mutex.
*/
fput(filp);
return 0;
}

Expand Down Expand Up @@ -1163,7 +1198,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
struct loop_device *lo = bdev->bd_disk->private_data;
int err;

mutex_lock(&lo->lo_ctl_mutex);
mutex_lock_nested(&lo->lo_ctl_mutex, 1);
switch (cmd) {
case LOOP_SET_FD:
err = loop_set_fd(lo, mode, bdev, arg);
Expand All @@ -1172,7 +1207,10 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
err = loop_change_fd(lo, bdev, arg);
break;
case LOOP_CLR_FD:
/* loop_clr_fd would have unlocked lo_ctl_mutex on success */
err = loop_clr_fd(lo, bdev);
if (!err)
goto out_unlocked;
break;
case LOOP_SET_STATUS:
err = loop_set_status_old(lo, (struct loop_info __user *) arg);
Expand All @@ -1190,6 +1228,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
}
mutex_unlock(&lo->lo_ctl_mutex);

out_unlocked:
return err;
}

Expand Down
Loading

0 comments on commit 86d9c07

Please sign in to comment.