Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 135950
b: refs/heads/master
c: 585d3bc
h: refs/heads/master
v: v3
  • Loading branch information
Nick Piggin authored and Al Viro committed Mar 27, 2009
1 parent 776e7cd commit d707e3f
Show file tree
Hide file tree
Showing 5 changed files with 154 additions and 153 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3ba13d179e8c24c68eac32b93593a6b10fcd1572
refs/heads/master: 585d3bc06f4ca57f975a5a1f698f65a45ea66225
146 changes: 146 additions & 0 deletions trunk/fs/block_dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/blkpg.h>
#include <linux/buffer_head.h>
#include <linux/pagevec.h>
#include <linux/writeback.h>
#include <linux/mpage.h>
#include <linux/mount.h>
Expand Down Expand Up @@ -174,6 +175,151 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
iov, offset, nr_segs, blkdev_get_blocks, NULL);
}

/*
* Write out and wait upon all the dirty data associated with a block
* device via its mapping. Does not take the superblock lock.
*/
int sync_blockdev(struct block_device *bdev)
{
int ret = 0;

if (bdev)
ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
return ret;
}
EXPORT_SYMBOL(sync_blockdev);

/*
* Write out and wait upon all dirty data associated with this
* device. Filesystem data as well as the underlying block
* device. Takes the superblock lock.
*/
int fsync_bdev(struct block_device *bdev)
{
struct super_block *sb = get_super(bdev);
if (sb) {
int res = fsync_super(sb);
drop_super(sb);
return res;
}
return sync_blockdev(bdev);
}

/**
* freeze_bdev -- lock a filesystem and force it into a consistent state
* @bdev: blockdevice to lock
*
* This takes the block device bd_mount_sem to make sure no new mounts
* happen on bdev until thaw_bdev() is called.
* If a superblock is found on this device, we take the s_umount semaphore
* on it to make sure nobody unmounts until the snapshot creation is done.
* The reference counter (bd_fsfreeze_count) guarantees that only the last
* unfreeze process can unfreeze the frozen filesystem actually when multiple
* freeze requests arrive simultaneously. It counts up in freeze_bdev() and
* count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
* actually.
*/
struct super_block *freeze_bdev(struct block_device *bdev)
{
struct super_block *sb;
int error = 0;

mutex_lock(&bdev->bd_fsfreeze_mutex);
if (bdev->bd_fsfreeze_count > 0) {
bdev->bd_fsfreeze_count++;
sb = get_super(bdev);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return sb;
}
bdev->bd_fsfreeze_count++;

down(&bdev->bd_mount_sem);
sb = get_super(bdev);
if (sb && !(sb->s_flags & MS_RDONLY)) {
sb->s_frozen = SB_FREEZE_WRITE;
smp_wmb();

__fsync_super(sb);

sb->s_frozen = SB_FREEZE_TRANS;
smp_wmb();

sync_blockdev(sb->s_bdev);

if (sb->s_op->freeze_fs) {
error = sb->s_op->freeze_fs(sb);
if (error) {
printk(KERN_ERR
"VFS:Filesystem freeze failed\n");
sb->s_frozen = SB_UNFROZEN;
drop_super(sb);
up(&bdev->bd_mount_sem);
bdev->bd_fsfreeze_count--;
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return ERR_PTR(error);
}
}
}

sync_blockdev(bdev);
mutex_unlock(&bdev->bd_fsfreeze_mutex);

return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
}
EXPORT_SYMBOL(freeze_bdev);

/**
* thaw_bdev -- unlock filesystem
* @bdev: blockdevice to unlock
* @sb: associated superblock
*
* Unlocks the filesystem and marks it writeable again after freeze_bdev().
*/
int thaw_bdev(struct block_device *bdev, struct super_block *sb)
{
int error = 0;

mutex_lock(&bdev->bd_fsfreeze_mutex);
if (!bdev->bd_fsfreeze_count) {
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return -EINVAL;
}

bdev->bd_fsfreeze_count--;
if (bdev->bd_fsfreeze_count > 0) {
if (sb)
drop_super(sb);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return 0;
}

if (sb) {
BUG_ON(sb->s_bdev != bdev);
if (!(sb->s_flags & MS_RDONLY)) {
if (sb->s_op->unfreeze_fs) {
error = sb->s_op->unfreeze_fs(sb);
if (error) {
printk(KERN_ERR
"VFS:Filesystem thaw failed\n");
sb->s_frozen = SB_FREEZE_TRANS;
bdev->bd_fsfreeze_count++;
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return error;
}
}
sb->s_frozen = SB_UNFROZEN;
smp_wmb();
wake_up(&sb->s_wait_unfrozen);
}
drop_super(sb);
}

up(&bdev->bd_mount_sem);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return 0;
}
EXPORT_SYMBOL(thaw_bdev);

static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
{
return block_write_full_page(page, blkdev_get_block, wbc);
Expand Down
145 changes: 0 additions & 145 deletions trunk/fs/buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -165,151 +165,6 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
put_bh(bh);
}

/*
* Write out and wait upon all the dirty data associated with a block
* device via its mapping. Does not take the superblock lock.
*/
int sync_blockdev(struct block_device *bdev)
{
int ret = 0;

if (bdev)
ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
return ret;
}
EXPORT_SYMBOL(sync_blockdev);

/*
* Write out and wait upon all dirty data associated with this
* device. Filesystem data as well as the underlying block
* device. Takes the superblock lock.
*/
int fsync_bdev(struct block_device *bdev)
{
struct super_block *sb = get_super(bdev);
if (sb) {
int res = fsync_super(sb);
drop_super(sb);
return res;
}
return sync_blockdev(bdev);
}

/**
* freeze_bdev -- lock a filesystem and force it into a consistent state
* @bdev: blockdevice to lock
*
* This takes the block device bd_mount_sem to make sure no new mounts
* happen on bdev until thaw_bdev() is called.
* If a superblock is found on this device, we take the s_umount semaphore
* on it to make sure nobody unmounts until the snapshot creation is done.
* The reference counter (bd_fsfreeze_count) guarantees that only the last
* unfreeze process can unfreeze the frozen filesystem actually when multiple
* freeze requests arrive simultaneously. It counts up in freeze_bdev() and
* count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
* actually.
*/
struct super_block *freeze_bdev(struct block_device *bdev)
{
struct super_block *sb;
int error = 0;

mutex_lock(&bdev->bd_fsfreeze_mutex);
if (bdev->bd_fsfreeze_count > 0) {
bdev->bd_fsfreeze_count++;
sb = get_super(bdev);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return sb;
}
bdev->bd_fsfreeze_count++;

down(&bdev->bd_mount_sem);
sb = get_super(bdev);
if (sb && !(sb->s_flags & MS_RDONLY)) {
sb->s_frozen = SB_FREEZE_WRITE;
smp_wmb();

__fsync_super(sb);

sb->s_frozen = SB_FREEZE_TRANS;
smp_wmb();

sync_blockdev(sb->s_bdev);

if (sb->s_op->freeze_fs) {
error = sb->s_op->freeze_fs(sb);
if (error) {
printk(KERN_ERR
"VFS:Filesystem freeze failed\n");
sb->s_frozen = SB_UNFROZEN;
drop_super(sb);
up(&bdev->bd_mount_sem);
bdev->bd_fsfreeze_count--;
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return ERR_PTR(error);
}
}
}

sync_blockdev(bdev);
mutex_unlock(&bdev->bd_fsfreeze_mutex);

return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
}
EXPORT_SYMBOL(freeze_bdev);

/**
* thaw_bdev -- unlock filesystem
* @bdev: blockdevice to unlock
* @sb: associated superblock
*
* Unlocks the filesystem and marks it writeable again after freeze_bdev().
*/
int thaw_bdev(struct block_device *bdev, struct super_block *sb)
{
int error = 0;

mutex_lock(&bdev->bd_fsfreeze_mutex);
if (!bdev->bd_fsfreeze_count) {
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return -EINVAL;
}

bdev->bd_fsfreeze_count--;
if (bdev->bd_fsfreeze_count > 0) {
if (sb)
drop_super(sb);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return 0;
}

if (sb) {
BUG_ON(sb->s_bdev != bdev);
if (!(sb->s_flags & MS_RDONLY)) {
if (sb->s_op->unfreeze_fs) {
error = sb->s_op->unfreeze_fs(sb);
if (error) {
printk(KERN_ERR
"VFS:Filesystem thaw failed\n");
sb->s_frozen = SB_FREEZE_TRANS;
bdev->bd_fsfreeze_count++;
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return error;
}
}
sb->s_frozen = SB_UNFROZEN;
smp_wmb();
wake_up(&sb->s_wait_unfrozen);
}
drop_super(sb);
}

up(&bdev->bd_mount_sem);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return 0;
}
EXPORT_SYMBOL(thaw_bdev);

/*
* Various filesystems appear to want __find_get_block to be non-blocking.
* But it's the page lock which protects the buffers. To get around this,
Expand Down
7 changes: 0 additions & 7 deletions trunk/include/linux/buffer_head.h
Original file line number Diff line number Diff line change
Expand Up @@ -165,15 +165,8 @@ int sync_mapping_buffers(struct address_space *mapping);
void unmap_underlying_metadata(struct block_device *bdev, sector_t block);

void mark_buffer_async_write(struct buffer_head *bh);
void invalidate_bdev(struct block_device *);
int sync_blockdev(struct block_device *bdev);
void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
int fsync_bdev(struct block_device *);
struct super_block *freeze_bdev(struct block_device *);
int thaw_bdev(struct block_device *, struct super_block *);
int fsync_super(struct super_block *);
int fsync_no_super(struct block_device *);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size);
struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
Expand Down
7 changes: 7 additions & 0 deletions trunk/include/linux/fs.h
Original file line number Diff line number Diff line change
Expand Up @@ -1874,6 +1874,13 @@ extern void bd_set_size(struct block_device *, loff_t size);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern struct block_device *open_by_devnum(dev_t, fmode_t);
extern void invalidate_bdev(struct block_device *);
extern int sync_blockdev(struct block_device *bdev);
extern struct super_block *freeze_bdev(struct block_device *);
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
extern int fsync_bdev(struct block_device *);
extern int fsync_super(struct super_block *);
extern int fsync_no_super(struct block_device *);
#else
static inline void bd_forget(struct inode *inode) {}
#endif
Expand Down

0 comments on commit d707e3f

Please sign in to comment.