Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 254759
b: refs/heads/master
c: b56045d
h: refs/heads/master
i:
  254757: 7e69cab
  254755: 5f6c76e
  254751: 183b2eb
v: v3
  • Loading branch information
Linus Torvalds committed Jul 9, 2011
1 parent 9d0c871 commit cc63878
Show file tree
Hide file tree
Showing 15 changed files with 182 additions and 73 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6ff325a2d85481cbe31dfbb74b4fd2d94c115300
refs/heads/master: b56045d4019a80935ead337e5c3f8e211fa9b471
2 changes: 1 addition & 1 deletion trunk/MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -4982,7 +4982,7 @@ F: drivers/power/power_supply*

PNP SUPPORT
M: Adam Belay <abelay@mit.edu>
M: Bjorn Helgaas <bjorn.helgaas@hp.com>
M: Bjorn Helgaas <bhelgaas@google.com>
S: Maintained
F: drivers/pnp/

Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/leds/leds-pca9532.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ static const struct pca9532_chip_info pca9532_chip_info_tbl[] = {

static struct i2c_driver pca9532_driver = {
.driver = {
.name = "pca953x",
.name = "leds-pca953x",
},
.probe = pca9532_probe,
.remove = pca9532_remove,
Expand Down
1 change: 1 addition & 0 deletions trunk/drivers/mfd/asic3.c
Original file line number Diff line number Diff line change
Expand Up @@ -619,6 +619,7 @@ static void asic3_clk_disable(struct asic3 *asic, struct asic3_clk *clk)
/* MFD cells (SPI, PWM, LED, DS1WM, MMC) */
static struct ds1wm_driver_data ds1wm_pdata = {
.active_high = 1,
.reset_recover_delay = 1,
};

static struct resource ds1wm_resources[] = {
Expand Down
1 change: 1 addition & 0 deletions trunk/drivers/mfd/htc-pasic3.c
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ static int ds1wm_disable(struct platform_device *pdev)

static struct ds1wm_driver_data ds1wm_pdata = {
.active_high = 0,
.reset_recover_delay = 1,
};

static struct resource ds1wm_resources[] __initdata = {
Expand Down
5 changes: 5 additions & 0 deletions trunk/drivers/w1/masters/ds1wm.c
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ struct ds1wm_data {
/* byte to write that makes all intr disabled, */
/* considering active_state (IAS) (optimization) */
u8 int_en_reg_none;
unsigned int reset_recover_delay; /* see ds1wm.h */
};

static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg,
Expand Down Expand Up @@ -187,6 +188,9 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
return 1;
}

if (ds1wm_data->reset_recover_delay)
msleep(ds1wm_data->reset_recover_delay);

return 0;
}

Expand Down Expand Up @@ -490,6 +494,7 @@ static int ds1wm_probe(struct platform_device *pdev)
}
ds1wm_data->irq = res->start;
ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
ds1wm_data->reset_recover_delay = plat->reset_recover_delay;

if (res->flags & IORESOURCE_IRQ_HIGHEDGE)
irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING);
Expand Down
5 changes: 5 additions & 0 deletions trunk/fs/btrfs/ctree.h
Original file line number Diff line number Diff line change
Expand Up @@ -1335,6 +1335,11 @@ struct btrfs_ioctl_defrag_range_args {
*/
#define BTRFS_STRING_ITEM_KEY 253

/*
* Flags for mount options.
*
* Note: don't forget to add new options to btrfs_show_options()
*/
#define BTRFS_MOUNT_NODATASUM (1 << 0)
#define BTRFS_MOUNT_NODATACOW (1 << 1)
#define BTRFS_MOUNT_NOBARRIER (1 << 2)
Expand Down
12 changes: 7 additions & 5 deletions trunk/fs/btrfs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -2678,12 +2678,14 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
int ret;

/*
* If root is tree root, it means this inode is used to
* store free space information. And these inodes are updated
* when committing the transaction, so they needn't delaye to
* be updated, or deadlock will occured.
* If the inode is a free space inode, we can deadlock during commit
* if we put it into the delayed code.
*
* The data relocation inode should also be directly updated
* without delay
*/
if (!is_free_space_inode(root, inode)) {
if (!is_free_space_inode(root, inode)
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_delayed_update_inode(trans, root, inode);
if (!ret)
btrfs_set_inode_last_trans(trans, inode);
Expand Down
6 changes: 6 additions & 0 deletions trunk/fs/btrfs/super.c
Original file line number Diff line number Diff line change
Expand Up @@ -723,6 +723,12 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_puts(seq, ",clear_cache");
if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
seq_puts(seq, ",user_subvol_rm_allowed");
if (btrfs_test_opt(root, ENOSPC_DEBUG))
seq_puts(seq, ",enospc_debug");
if (btrfs_test_opt(root, AUTO_DEFRAG))
seq_puts(seq, ",autodefrag");
if (btrfs_test_opt(root, INODE_MAP_CACHE))
seq_puts(seq, ",inode_cache");
return 0;
}

Expand Down
3 changes: 2 additions & 1 deletion trunk/fs/btrfs/volumes.c
Original file line number Diff line number Diff line change
Expand Up @@ -2098,7 +2098,8 @@ int btrfs_balance(struct btrfs_root *dev_root)
chunk_root->root_key.objectid,
found_key.objectid,
found_key.offset);
BUG_ON(ret && ret != -ENOSPC);
if (ret && ret != -ENOSPC)
goto error;
key.offset = found_key.offset - 1;
}
ret = 0;
Expand Down
7 changes: 7 additions & 0 deletions trunk/include/linux/mfd/ds1wm.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,11 @@
struct ds1wm_driver_data {
int active_high;
int clock_rate;
/* in milliseconds, the amount of time to */
/* sleep following a reset pulse. Zero */
/* should work if your bus devices recover*/
/* time respects the 1-wire spec since the*/
/* ds1wm implements the precise timings of*/
/* a reset pulse/presence detect sequence.*/
unsigned int reset_recover_delay;
};
140 changes: 103 additions & 37 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,10 +108,12 @@ enum mem_cgroup_events_index {
enum mem_cgroup_events_target {
MEM_CGROUP_TARGET_THRESH,
MEM_CGROUP_TARGET_SOFTLIMIT,
MEM_CGROUP_TARGET_NUMAINFO,
MEM_CGROUP_NTARGETS,
};
#define THRESHOLDS_EVENTS_TARGET (128)
#define SOFTLIMIT_EVENTS_TARGET (1024)
#define NUMAINFO_EVENTS_TARGET (1024)

struct mem_cgroup_stat_cpu {
long count[MEM_CGROUP_STAT_NSTATS];
Expand Down Expand Up @@ -237,7 +239,8 @@ struct mem_cgroup {
int last_scanned_node;
#if MAX_NUMNODES > 1
nodemask_t scan_nodes;
unsigned long next_scan_node_update;
atomic_t numainfo_events;
atomic_t numainfo_updating;
#endif
/*
* Should the accounting and control be hierarchical, per subtree?
Expand Down Expand Up @@ -577,15 +580,6 @@ static long mem_cgroup_read_stat(struct mem_cgroup *mem,
return val;
}

static long mem_cgroup_local_usage(struct mem_cgroup *mem)
{
long ret;

ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
return ret;
}

static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
bool charge)
{
Expand Down Expand Up @@ -689,6 +683,9 @@ static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
case MEM_CGROUP_TARGET_SOFTLIMIT:
next = val + SOFTLIMIT_EVENTS_TARGET;
break;
case MEM_CGROUP_TARGET_NUMAINFO:
next = val + NUMAINFO_EVENTS_TARGET;
break;
default:
return;
}
Expand All @@ -707,11 +704,19 @@ static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
mem_cgroup_threshold(mem);
__mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
if (unlikely(__memcg_event_check(mem,
MEM_CGROUP_TARGET_SOFTLIMIT))){
MEM_CGROUP_TARGET_SOFTLIMIT))) {
mem_cgroup_update_tree(mem, page);
__mem_cgroup_target_update(mem,
MEM_CGROUP_TARGET_SOFTLIMIT);
MEM_CGROUP_TARGET_SOFTLIMIT);
}
#if MAX_NUMNODES > 1
if (unlikely(__memcg_event_check(mem,
MEM_CGROUP_TARGET_NUMAINFO))) {
atomic_inc(&mem->numainfo_events);
__mem_cgroup_target_update(mem,
MEM_CGROUP_TARGET_NUMAINFO);
}
#endif
}
}

Expand Down Expand Up @@ -1129,7 +1134,6 @@ unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
return MEM_CGROUP_ZSTAT(mz, lru);
}

#ifdef CONFIG_NUMA
static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg,
int nid)
{
Expand All @@ -1141,6 +1145,17 @@ static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg,
return ret;
}

static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg,
int nid)
{
unsigned long ret;

ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) +
mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON);
return ret;
}

#if MAX_NUMNODES > 1
static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg)
{
u64 total = 0;
Expand All @@ -1152,17 +1167,6 @@ static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg)
return total;
}

static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg,
int nid)
{
unsigned long ret;

ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) +
mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON);

return ret;
}

static unsigned long mem_cgroup_nr_anon_lru_pages(struct mem_cgroup *memcg)
{
u64 total = 0;
Expand Down Expand Up @@ -1559,6 +1563,28 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
return ret;
}

/**
* test_mem_cgroup_node_reclaimable
* @mem: the target memcg
* @nid: the node ID to be checked.
* @noswap : specify true here if the user wants flle only information.
*
* This function returns whether the specified memcg contains any
* reclaimable pages on a node. Returns true if there are any reclaimable
* pages in the node.
*/
static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem,
int nid, bool noswap)
{
if (mem_cgroup_node_nr_file_lru_pages(mem, nid))
return true;
if (noswap || !total_swap_pages)
return false;
if (mem_cgroup_node_nr_anon_lru_pages(mem, nid))
return true;
return false;

}
#if MAX_NUMNODES > 1

/*
Expand All @@ -1570,26 +1596,26 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
{
int nid;

if (time_after(mem->next_scan_node_update, jiffies))
/*
* numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
* pagein/pageout changes since the last update.
*/
if (!atomic_read(&mem->numainfo_events))
return;
if (atomic_inc_return(&mem->numainfo_updating) > 1)
return;

mem->next_scan_node_update = jiffies + 10*HZ;
/* make a nodemask where this memcg uses memory from */
mem->scan_nodes = node_states[N_HIGH_MEMORY];

for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {

if (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_FILE) ||
mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_FILE))
continue;

if (total_swap_pages &&
(mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_ANON) ||
mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_ANON)))
continue;
node_clear(nid, mem->scan_nodes);
if (!test_mem_cgroup_node_reclaimable(mem, nid, false))
node_clear(nid, mem->scan_nodes);
}

atomic_set(&mem->numainfo_events, 0);
atomic_set(&mem->numainfo_updating, 0);
}

/*
Expand Down Expand Up @@ -1627,11 +1653,51 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
return node;
}

/*
* Check all nodes whether it contains reclaimable pages or not.
* For quick scan, we make use of scan_nodes. This will allow us to skip
* unused nodes. But scan_nodes is lazily updated and may not cotain
* enough new information. We need to do double check.
*/
bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
{
int nid;

/*
* quick check...making use of scan_node.
* We can skip unused nodes.
*/
if (!nodes_empty(mem->scan_nodes)) {
for (nid = first_node(mem->scan_nodes);
nid < MAX_NUMNODES;
nid = next_node(nid, mem->scan_nodes)) {

if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
return true;
}
}
/*
* Check rest of nodes.
*/
for_each_node_state(nid, N_HIGH_MEMORY) {
if (node_isset(nid, mem->scan_nodes))
continue;
if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
return true;
}
return false;
}

#else
int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
{
return 0;
}

bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
{
return test_mem_cgroup_node_reclaimable(mem, 0, noswap);
}
#endif

/*
Expand Down Expand Up @@ -1702,7 +1768,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
}
}
}
if (!mem_cgroup_local_usage(victim)) {
if (!mem_cgroup_reclaimable(victim, noswap)) {
/* this cgroup's local usage == 0 */
css_put(&victim->css);
continue;
Expand Down
1 change: 1 addition & 0 deletions trunk/mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb))
return 0;
batch = tlb->active;
}
VM_BUG_ON(batch->nr > batch->max);

Expand Down
9 changes: 6 additions & 3 deletions trunk/mm/nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1813,10 +1813,13 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
return NULL;
}

int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
unsigned long to, unsigned long size, pgprot_t prot)
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
if (addr != (pfn << PAGE_SHIFT))
return -EINVAL;

vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
return 0;
}
EXPORT_SYMBOL(remap_pfn_range);
Expand Down
Loading

0 comments on commit cc63878

Please sign in to comment.