Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 223085
b: refs/heads/master
c: 6229cdb
h: refs/heads/master
i:
  223083: ebf81a0
v: v3
  • Loading branch information
Jean Delvare authored and Jean Delvare committed Dec 8, 2010
1 parent faaea39 commit 49195ff
Show file tree
Hide file tree
Showing 53 changed files with 417 additions and 666 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5111711d3ed8f4f1012cac3ec3f2b463b549fbfd
refs/heads/master: 6229cdb23648d0c2875b3fb102cdaf4bf08fcfa4
7 changes: 1 addition & 6 deletions trunk/Documentation/filesystems/Locking
Original file line number Diff line number Diff line change
Expand Up @@ -173,13 +173,12 @@ prototypes:
sector_t (*bmap)(struct address_space *, sector_t);
int (*invalidatepage) (struct page *, unsigned long);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
loff_t offset, unsigned long nr_segs);
int (*launder_page) (struct page *);

locking rules:
All except set_page_dirty and freepage may block
All except set_page_dirty may block

BKL PageLocked(page) i_mutex
writepage: no yes, unlocks (see below)
Expand All @@ -194,7 +193,6 @@ perform_write: no n/a yes
bmap: no
invalidatepage: no yes
releasepage: no yes
freepage: no yes
direct_IO: no
launder_page: no yes

Expand Down Expand Up @@ -290,9 +288,6 @@ buffers from the page in preparation for freeing it. It returns zero to
indicate that the buffers are (or may be) freeable. If ->releasepage is zero,
the kernel assumes that the fs has no private interest in the buffers.

->freepage() is called when the kernel is done dropping the page
from the page cache.

->launder_page() may be called prior to releasing a page if
it is still found to be dirty. It returns zero if the page was successfully
cleaned, or an error value if not. Note that in order to prevent the page
Expand Down
7 changes: 0 additions & 7 deletions trunk/Documentation/filesystems/vfs.txt
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,6 @@ struct address_space_operations {
sector_t (*bmap)(struct address_space *, sector_t);
int (*invalidatepage) (struct page *, unsigned long);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
loff_t offset, unsigned long nr_segs);
struct page* (*get_xip_page)(struct address_space *, sector_t,
Expand Down Expand Up @@ -679,12 +678,6 @@ struct address_space_operations {
need to ensure this. Possibly it can clear the PageUptodate
bit if it cannot free private data yet.

freepage: freepage is called once the page is no longer visible in
the page cache in order to allow the cleanup of any private
data. Since it may be called by the memory reclaimer, it
should not assume that the original address_space mapping still
exists, and it should not block.

direct_IO: called by the generic read/write routines to perform
direct_IO - that is IO requests which bypass the page cache
and transfer data directly between the storage and the
Expand Down
61 changes: 45 additions & 16 deletions trunk/drivers/hwmon/it87.c
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,7 @@ static const u8 IT87_REG_FANX_MIN[] = { 0x1b, 0x1c, 0x1d, 0x85, 0x87 };
#define IT87_REG_FAN_MAIN_CTRL 0x13
#define IT87_REG_FAN_CTL 0x14
#define IT87_REG_PWM(nr) (0x15 + (nr))
#define IT87_REG_PWM_DUTY(nr) (0x63 + (nr) * 8)

#define IT87_REG_VIN(nr) (0x20 + (nr))
#define IT87_REG_TEMP(nr) (0x29 + (nr))
Expand Down Expand Up @@ -251,12 +252,16 @@ struct it87_data {
u8 fan_main_ctrl; /* Register value */
u8 fan_ctl; /* Register value */

/* The following 3 arrays correspond to the same registers. The
* meaning of bits 6-0 depends on the value of bit 7, and we want
* to preserve settings on mode changes, so we have to track all
* values separately. */
/* The following 3 arrays correspond to the same registers up to
* the IT8720F. The meaning of bits 6-0 depends on the value of bit
* 7, and we want to preserve settings on mode changes, so we have
* to track all values separately.
* Starting with the IT8721F, the manual PWM duty cycles are stored
* in separate registers (8-bit values), so the separate tracking
* is no longer needed, but it is still done to keep the driver
* simple. */
u8 pwm_ctrl[3]; /* Register value */
u8 pwm_duty[3]; /* Manual PWM value set by user (bit 6-0) */
u8 pwm_duty[3]; /* Manual PWM value set by user */
u8 pwm_temp_map[3]; /* PWM to temp. chan. mapping (bits 1-0) */

/* Automatic fan speed control registers */
Expand Down Expand Up @@ -832,7 +837,9 @@ static ssize_t set_pwm_enable(struct device *dev,
data->fan_main_ctrl);
} else {
if (val == 1) /* Manual mode */
data->pwm_ctrl[nr] = data->pwm_duty[nr];
data->pwm_ctrl[nr] = data->type == it8721 ?
data->pwm_temp_map[nr] :
data->pwm_duty[nr];
else /* Automatic mode */
data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
Expand All @@ -858,12 +865,25 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
return -EINVAL;

mutex_lock(&data->update_lock);
data->pwm_duty[nr] = pwm_to_reg(data, val);
/* If we are in manual mode, write the duty cycle immediately;
* otherwise, just store it for later use. */
if (!(data->pwm_ctrl[nr] & 0x80)) {
data->pwm_ctrl[nr] = data->pwm_duty[nr];
it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
if (data->type == it8721) {
/* If we are in automatic mode, the PWM duty cycle register
* is read-only so we can't write the value */
if (data->pwm_ctrl[nr] & 0x80) {
mutex_unlock(&data->update_lock);
return -EBUSY;
}
data->pwm_duty[nr] = pwm_to_reg(data, val);
it87_write_value(data, IT87_REG_PWM_DUTY(nr),
data->pwm_duty[nr]);
} else {
data->pwm_duty[nr] = pwm_to_reg(data, val);
/* If we are in manual mode, write the duty cycle immediately;
* otherwise, just store it for later use. */
if (!(data->pwm_ctrl[nr] & 0x80)) {
data->pwm_ctrl[nr] = data->pwm_duty[nr];
it87_write_value(data, IT87_REG_PWM(nr),
data->pwm_ctrl[nr]);
}
}
mutex_unlock(&data->update_lock);
return count;
Expand Down Expand Up @@ -1958,7 +1978,10 @@ static void __devinit it87_init_device(struct platform_device *pdev)
* channels to use when later setting to automatic mode later.
* Use a 1:1 mapping by default (we are clueless.)
* In both cases, the value can (and should) be changed by the user
* prior to switching to a different mode. */
* prior to switching to a different mode.
* Note that this is no longer needed for the IT8721F and later, as
* these have separate registers for the temperature mapping and the
* manual duty cycle. */
for (i = 0; i < 3; i++) {
data->pwm_temp_map[i] = i;
data->pwm_duty[i] = 0x7f; /* Full speed */
Expand Down Expand Up @@ -2034,10 +2057,16 @@ static void __devinit it87_init_device(struct platform_device *pdev)
static void it87_update_pwm_ctrl(struct it87_data *data, int nr)
{
data->pwm_ctrl[nr] = it87_read_value(data, IT87_REG_PWM(nr));
if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */
if (data->type == it8721) {
data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
else /* Manual mode */
data->pwm_duty[nr] = data->pwm_ctrl[nr] & 0x7f;
data->pwm_duty[nr] = it87_read_value(data,
IT87_REG_PWM_DUTY(nr));
} else {
if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */
data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
else /* Manual mode */
data->pwm_duty[nr] = data->pwm_ctrl[nr] & 0x7f;
}

if (has_old_autopwm(data)) {
int i;
Expand Down
11 changes: 5 additions & 6 deletions trunk/fs/btrfs/disk-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -696,7 +696,6 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
__btree_submit_bio_done);
}

#ifdef CONFIG_MIGRATION
static int btree_migratepage(struct address_space *mapping,
struct page *newpage, struct page *page)
{
Expand All @@ -713,9 +712,12 @@ static int btree_migratepage(struct address_space *mapping,
if (page_has_private(page) &&
!try_to_release_page(page, GFP_KERNEL))
return -EAGAIN;
#ifdef CONFIG_MIGRATION
return migrate_page(mapping, newpage, page);
}
#else
return -ENOSYS;
#endif
}

static int btree_writepage(struct page *page, struct writeback_control *wbc)
{
Expand Down Expand Up @@ -1007,10 +1009,7 @@ static int find_and_setup_root(struct btrfs_root *tree_root,
blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
blocksize, generation);
if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) {
free_extent_buffer(root->node);
return -EIO;
}
BUG_ON(!root->node);
root->commit_root = btrfs_root_node(root);
return 0;
}
Expand Down
75 changes: 16 additions & 59 deletions trunk/fs/btrfs/extent-tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,6 @@ static int caching_kthread(void *data)

static int cache_block_group(struct btrfs_block_group_cache *cache,
struct btrfs_trans_handle *trans,
struct btrfs_root *root,
int load_cache_only)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
Expand All @@ -443,12 +442,9 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,

/*
* We can't do the read from on-disk cache during a commit since we need
* to have the normal tree locking. Also if we are currently trying to
* allocate blocks for the tree root we can't do the fast caching since
* we likely hold important locks.
* to have the normal tree locking.
*/
if (!trans->transaction->in_commit &&
(root && root != root->fs_info->tree_root)) {
if (!trans->transaction->in_commit) {
spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_NO) {
spin_unlock(&cache->lock);
Expand Down Expand Up @@ -2745,7 +2741,6 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
struct btrfs_root *root = block_group->fs_info->tree_root;
struct inode *inode = NULL;
u64 alloc_hint = 0;
int dcs = BTRFS_DC_ERROR;
int num_pages = 0;
int retries = 0;
int ret = 0;
Expand Down Expand Up @@ -2800,8 +2795,6 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,

spin_lock(&block_group->lock);
if (block_group->cached != BTRFS_CACHE_FINISHED) {
/* We're not cached, don't bother trying to write stuff out */
dcs = BTRFS_DC_WRITTEN;
spin_unlock(&block_group->lock);
goto out_put;
}
Expand All @@ -2828,16 +2821,17 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
num_pages, num_pages,
&alloc_hint);
if (!ret)
dcs = BTRFS_DC_SETUP;
btrfs_free_reserved_data_space(inode, num_pages);
out_put:
iput(inode);
out_free:
btrfs_release_path(root, path);
out:
spin_lock(&block_group->lock);
block_group->disk_cache_state = dcs;
if (ret)
block_group->disk_cache_state = BTRFS_DC_ERROR;
else
block_group->disk_cache_state = BTRFS_DC_SETUP;
spin_unlock(&block_group->lock);

return ret;
Expand Down Expand Up @@ -3043,13 +3037,7 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)

u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
{
/*
* we add in the count of missing devices because we want
* to make sure that any RAID levels on a degraded FS
* continue to be honored.
*/
u64 num_devices = root->fs_info->fs_devices->rw_devices +
root->fs_info->fs_devices->missing_devices;
u64 num_devices = root->fs_info->fs_devices->rw_devices;

if (num_devices == 1)
flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
Expand Down Expand Up @@ -4092,7 +4080,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
* space back to the block group, otherwise we will leak space.
*/
if (!alloc && cache->cached == BTRFS_CACHE_NO)
cache_block_group(cache, trans, NULL, 1);
cache_block_group(cache, trans, 1);

byte_in_group = bytenr - cache->key.objectid;
WARN_ON(byte_in_group > cache->key.offset);
Expand Down Expand Up @@ -4942,31 +4930,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
btrfs_get_block_group(block_group);
search_start = block_group->key.objectid;

/*
* this can happen if we end up cycling through all the
* raid types, but we want to make sure we only allocate
* for the proper type.
*/
if (!block_group_bits(block_group, data)) {
u64 extra = BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10;

/*
* if they asked for extra copies and this block group
* doesn't provide them, bail. This does allow us to
* fill raid0 from raid1.
*/
if ((data & extra) && !(block_group->flags & extra))
goto loop;
}

have_block_group:
if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
u64 free_percent;

ret = cache_block_group(block_group, trans,
orig_root, 1);
ret = cache_block_group(block_group, trans, 1);
if (block_group->cached == BTRFS_CACHE_FINISHED)
goto have_block_group;

Expand All @@ -4990,8 +4958,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
if (loop > LOOP_CACHING_NOWAIT ||
(loop > LOOP_FIND_IDEAL &&
atomic_read(&space_info->caching_threads) < 2)) {
ret = cache_block_group(block_group, trans,
orig_root, 0);
ret = cache_block_group(block_group, trans, 0);
BUG_ON(ret);
}
found_uncached_bg = true;
Expand Down Expand Up @@ -5548,7 +5515,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
u64 num_bytes = ins->offset;

block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
cache_block_group(block_group, trans, NULL, 0);
cache_block_group(block_group, trans, 0);
caching_ctl = get_caching_control(block_group);

if (!caching_ctl) {
Expand Down Expand Up @@ -6333,13 +6300,9 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
NULL, NULL);
BUG_ON(ret < 0);
if (ret > 0) {
/* if we fail to delete the orphan item this time
* around, it'll get picked up the next time.
*
* The most common failure here is just -ENOENT.
*/
btrfs_del_orphan_item(trans, tree_root,
root->root_key.objectid);
ret = btrfs_del_orphan_item(trans, tree_root,
root->root_key.objectid);
BUG_ON(ret);
}
}

Expand Down Expand Up @@ -7915,14 +7878,7 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;

/*
* we add in the count of missing devices because we want
* to make sure that any RAID levels on a degraded FS
* continue to be honored.
*/
num_devices = root->fs_info->fs_devices->rw_devices +
root->fs_info->fs_devices->missing_devices;

num_devices = root->fs_info->fs_devices->rw_devices;
if (num_devices == 1) {
stripped |= BTRFS_BLOCK_GROUP_DUP;
stripped = flags & ~stripped;
Expand Down Expand Up @@ -8291,6 +8247,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
break;
if (ret != 0)
goto error;

leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
cache = kzalloc(sizeof(*cache), GFP_NOFS);
Expand Down
Loading

0 comments on commit 49195ff

Please sign in to comment.