diff --git a/[refs] b/[refs] index f5290310bd43..228a083687da 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 1acc9309eb2674533944f48dbaaa53e7750e3947 +refs/heads/master: e2cf3137aaa23faf2b8c1335671f1a0a0d90aab5 diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 4af7ab032660..9820e89c827c 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -4982,7 +4982,7 @@ F: drivers/power/power_supply* PNP SUPPORT M: Adam Belay -M: Bjorn Helgaas +M: Bjorn Helgaas S: Maintained F: drivers/pnp/ diff --git a/trunk/drivers/leds/leds-pca9532.c b/trunk/drivers/leds/leds-pca9532.c index a2c874623e35..d8d3a1e910a1 100644 --- a/trunk/drivers/leds/leds-pca9532.c +++ b/trunk/drivers/leds/leds-pca9532.c @@ -88,7 +88,7 @@ static const struct pca9532_chip_info pca9532_chip_info_tbl[] = { static struct i2c_driver pca9532_driver = { .driver = { - .name = "leds-pca953x", + .name = "pca953x", }, .probe = pca9532_probe, .remove = pca9532_remove, diff --git a/trunk/drivers/mfd/asic3.c b/trunk/drivers/mfd/asic3.c index c71ae09430c5..c27fd1fc3b86 100644 --- a/trunk/drivers/mfd/asic3.c +++ b/trunk/drivers/mfd/asic3.c @@ -619,7 +619,6 @@ static void asic3_clk_disable(struct asic3 *asic, struct asic3_clk *clk) /* MFD cells (SPI, PWM, LED, DS1WM, MMC) */ static struct ds1wm_driver_data ds1wm_pdata = { .active_high = 1, - .reset_recover_delay = 1, }; static struct resource ds1wm_resources[] = { diff --git a/trunk/drivers/mfd/htc-pasic3.c b/trunk/drivers/mfd/htc-pasic3.c index 04c7093d6499..2808bd125d13 100644 --- a/trunk/drivers/mfd/htc-pasic3.c +++ b/trunk/drivers/mfd/htc-pasic3.c @@ -99,7 +99,6 @@ static int ds1wm_disable(struct platform_device *pdev) static struct ds1wm_driver_data ds1wm_pdata = { .active_high = 0, - .reset_recover_delay = 1, }; static struct resource ds1wm_resources[] __initdata = { diff --git a/trunk/drivers/regulator/max8952.c b/trunk/drivers/regulator/max8952.c index daff7fd0e95c..486ed8141fcd 100644 --- a/trunk/drivers/regulator/max8952.c +++ b/trunk/drivers/regulator/max8952.c @@ -139,7 +139,7 @@ static int max8952_set_voltage(struct regulator_dev *rdev, s8 vid = -1, i; if (!gpio_is_valid(max8952->pdata->gpio_vid0) || - !gpio_is_valid(max8952->pdata->gpio_vid0)) { + !gpio_is_valid(max8952->pdata->gpio_vid1)) { /* DVS not supported */ return -EPERM; } diff --git a/trunk/drivers/w1/masters/ds1wm.c b/trunk/drivers/w1/masters/ds1wm.c index a0c8965c1a79..ad57593d224a 100644 --- a/trunk/drivers/w1/masters/ds1wm.c +++ b/trunk/drivers/w1/masters/ds1wm.c @@ -109,7 +109,6 @@ struct ds1wm_data { /* byte to write that makes all intr disabled, */ /* considering active_state (IAS) (optimization) */ u8 int_en_reg_none; - unsigned int reset_recover_delay; /* see ds1wm.h */ }; static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, @@ -188,9 +187,6 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data) return 1; } - if (ds1wm_data->reset_recover_delay) - msleep(ds1wm_data->reset_recover_delay); - return 0; } @@ -494,7 +490,6 @@ static int ds1wm_probe(struct platform_device *pdev) } ds1wm_data->irq = res->start; ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0); - ds1wm_data->reset_recover_delay = plat->reset_recover_delay; if (res->flags & IORESOURCE_IRQ_HIGHEDGE) irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); diff --git a/trunk/fs/btrfs/ctree.h b/trunk/fs/btrfs/ctree.h index 3b859a3e6a0e..f30ac05dbda7 100644 --- a/trunk/fs/btrfs/ctree.h +++ b/trunk/fs/btrfs/ctree.h @@ -1335,11 +1335,6 @@ struct btrfs_ioctl_defrag_range_args { */ #define BTRFS_STRING_ITEM_KEY 253 -/* - * Flags for mount options. - * - * Note: don't forget to add new options to btrfs_show_options() - */ #define BTRFS_MOUNT_NODATASUM (1 << 0) #define BTRFS_MOUNT_NODATACOW (1 << 1) #define BTRFS_MOUNT_NOBARRIER (1 << 2) diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index 3601f0aebddf..d340f63d8f07 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -2678,14 +2678,12 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, int ret; /* - * If the inode is a free space inode, we can deadlock during commit - * if we put it into the delayed code. - * - * The data relocation inode should also be directly updated - * without delay + * If root is tree root, it means this inode is used to + * store free space information. And these inodes are updated + * when committing the transaction, so they needn't delaye to + * be updated, or deadlock will occured. */ - if (!is_free_space_inode(root, inode) - && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { + if (!is_free_space_inode(root, inode)) { ret = btrfs_delayed_update_inode(trans, root, inode); if (!ret) btrfs_set_inode_last_trans(trans, inode); diff --git a/trunk/fs/btrfs/super.c b/trunk/fs/btrfs/super.c index 15634d4648d7..0bb4ebbb71b7 100644 --- a/trunk/fs/btrfs/super.c +++ b/trunk/fs/btrfs/super.c @@ -723,12 +723,6 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs) seq_puts(seq, ",clear_cache"); if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) seq_puts(seq, ",user_subvol_rm_allowed"); - if (btrfs_test_opt(root, ENOSPC_DEBUG)) - seq_puts(seq, ",enospc_debug"); - if (btrfs_test_opt(root, AUTO_DEFRAG)) - seq_puts(seq, ",autodefrag"); - if (btrfs_test_opt(root, INODE_MAP_CACHE)) - seq_puts(seq, ",inode_cache"); return 0; } diff --git a/trunk/fs/btrfs/volumes.c b/trunk/fs/btrfs/volumes.c index 19450bc53632..1efa56e18f9b 100644 --- a/trunk/fs/btrfs/volumes.c +++ b/trunk/fs/btrfs/volumes.c @@ -2098,8 +2098,7 @@ int btrfs_balance(struct btrfs_root *dev_root) chunk_root->root_key.objectid, found_key.objectid, found_key.offset); - if (ret && ret != -ENOSPC) - goto error; + BUG_ON(ret && ret != -ENOSPC); key.offset = found_key.offset - 1; } ret = 0; diff --git a/trunk/include/linux/mfd/ds1wm.h b/trunk/include/linux/mfd/ds1wm.h index 38a372a0e285..be469a357cbb 100644 --- a/trunk/include/linux/mfd/ds1wm.h +++ b/trunk/include/linux/mfd/ds1wm.h @@ -3,11 +3,4 @@ struct ds1wm_driver_data { int active_high; int clock_rate; - /* in milliseconds, the amount of time to */ - /* sleep following a reset pulse. Zero */ - /* should work if your bus devices recover*/ - /* time respects the 1-wire spec since the*/ - /* ds1wm implements the precise timings of*/ - /* a reset pulse/presence detect sequence.*/ - unsigned int reset_recover_delay; }; diff --git a/trunk/mm/memcontrol.c b/trunk/mm/memcontrol.c index e013b8e57d25..ddffc74cdebe 100644 --- a/trunk/mm/memcontrol.c +++ b/trunk/mm/memcontrol.c @@ -108,12 +108,10 @@ enum mem_cgroup_events_index { enum mem_cgroup_events_target { MEM_CGROUP_TARGET_THRESH, MEM_CGROUP_TARGET_SOFTLIMIT, - MEM_CGROUP_TARGET_NUMAINFO, MEM_CGROUP_NTARGETS, }; #define THRESHOLDS_EVENTS_TARGET (128) #define SOFTLIMIT_EVENTS_TARGET (1024) -#define NUMAINFO_EVENTS_TARGET (1024) struct mem_cgroup_stat_cpu { long count[MEM_CGROUP_STAT_NSTATS]; @@ -239,8 +237,7 @@ struct mem_cgroup { int last_scanned_node; #if MAX_NUMNODES > 1 nodemask_t scan_nodes; - atomic_t numainfo_events; - atomic_t numainfo_updating; + unsigned long next_scan_node_update; #endif /* * Should the accounting and control be hierarchical, per subtree? @@ -580,6 +577,15 @@ static long mem_cgroup_read_stat(struct mem_cgroup *mem, return val; } +static long mem_cgroup_local_usage(struct mem_cgroup *mem) +{ + long ret; + + ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); + ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); + return ret; +} + static void mem_cgroup_swap_statistics(struct mem_cgroup *mem, bool charge) { @@ -683,9 +689,6 @@ static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target) case MEM_CGROUP_TARGET_SOFTLIMIT: next = val + SOFTLIMIT_EVENTS_TARGET; break; - case MEM_CGROUP_TARGET_NUMAINFO: - next = val + NUMAINFO_EVENTS_TARGET; - break; default: return; } @@ -704,19 +707,11 @@ static void memcg_check_events(struct mem_cgroup *mem, struct page *page) mem_cgroup_threshold(mem); __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH); if (unlikely(__memcg_event_check(mem, - MEM_CGROUP_TARGET_SOFTLIMIT))) { + MEM_CGROUP_TARGET_SOFTLIMIT))){ mem_cgroup_update_tree(mem, page); __mem_cgroup_target_update(mem, - MEM_CGROUP_TARGET_SOFTLIMIT); - } -#if MAX_NUMNODES > 1 - if (unlikely(__memcg_event_check(mem, - MEM_CGROUP_TARGET_NUMAINFO))) { - atomic_inc(&mem->numainfo_events); - __mem_cgroup_target_update(mem, - MEM_CGROUP_TARGET_NUMAINFO); + MEM_CGROUP_TARGET_SOFTLIMIT); } -#endif } } @@ -1134,6 +1129,7 @@ unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, return MEM_CGROUP_ZSTAT(mz, lru); } +#ifdef CONFIG_NUMA static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg, int nid) { @@ -1145,17 +1141,6 @@ static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg, return ret; } -static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg, - int nid) -{ - unsigned long ret; - - ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) + - mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON); - return ret; -} - -#if MAX_NUMNODES > 1 static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg) { u64 total = 0; @@ -1167,6 +1152,17 @@ static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg) return total; } +static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg, + int nid) +{ + unsigned long ret; + + ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) + + mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON); + + return ret; +} + static unsigned long mem_cgroup_nr_anon_lru_pages(struct mem_cgroup *memcg) { u64 total = 0; @@ -1563,28 +1559,6 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem) return ret; } -/** - * test_mem_cgroup_node_reclaimable - * @mem: the target memcg - * @nid: the node ID to be checked. - * @noswap : specify true here if the user wants flle only information. - * - * This function returns whether the specified memcg contains any - * reclaimable pages on a node. Returns true if there are any reclaimable - * pages in the node. - */ -static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem, - int nid, bool noswap) -{ - if (mem_cgroup_node_nr_file_lru_pages(mem, nid)) - return true; - if (noswap || !total_swap_pages) - return false; - if (mem_cgroup_node_nr_anon_lru_pages(mem, nid)) - return true; - return false; - -} #if MAX_NUMNODES > 1 /* @@ -1596,26 +1570,26 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem, static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem) { int nid; - /* - * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET - * pagein/pageout changes since the last update. - */ - if (!atomic_read(&mem->numainfo_events)) - return; - if (atomic_inc_return(&mem->numainfo_updating) > 1) + + if (time_after(mem->next_scan_node_update, jiffies)) return; + mem->next_scan_node_update = jiffies + 10*HZ; /* make a nodemask where this memcg uses memory from */ mem->scan_nodes = node_states[N_HIGH_MEMORY]; for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) { - if (!test_mem_cgroup_node_reclaimable(mem, nid, false)) - node_clear(nid, mem->scan_nodes); - } + if (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_FILE) || + mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_FILE)) + continue; - atomic_set(&mem->numainfo_events, 0); - atomic_set(&mem->numainfo_updating, 0); + if (total_swap_pages && + (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_ANON) || + mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_ANON))) + continue; + node_clear(nid, mem->scan_nodes); + } } /* @@ -1653,51 +1627,11 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *mem) return node; } -/* - * Check all nodes whether it contains reclaimable pages or not. - * For quick scan, we make use of scan_nodes. This will allow us to skip - * unused nodes. But scan_nodes is lazily updated and may not cotain - * enough new information. We need to do double check. - */ -bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) -{ - int nid; - - /* - * quick check...making use of scan_node. - * We can skip unused nodes. - */ - if (!nodes_empty(mem->scan_nodes)) { - for (nid = first_node(mem->scan_nodes); - nid < MAX_NUMNODES; - nid = next_node(nid, mem->scan_nodes)) { - - if (test_mem_cgroup_node_reclaimable(mem, nid, noswap)) - return true; - } - } - /* - * Check rest of nodes. - */ - for_each_node_state(nid, N_HIGH_MEMORY) { - if (node_isset(nid, mem->scan_nodes)) - continue; - if (test_mem_cgroup_node_reclaimable(mem, nid, noswap)) - return true; - } - return false; -} - #else int mem_cgroup_select_victim_node(struct mem_cgroup *mem) { return 0; } - -bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) -{ - return test_mem_cgroup_node_reclaimable(mem, 0, noswap); -} #endif /* @@ -1768,7 +1702,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, } } } - if (!mem_cgroup_reclaimable(victim, noswap)) { + if (!mem_cgroup_local_usage(victim)) { /* this cgroup's local usage == 0 */ css_put(&victim->css); continue; diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c index 9b8a01d941cb..40b7531ee8ba 100644 --- a/trunk/mm/memory.c +++ b/trunk/mm/memory.c @@ -305,7 +305,6 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) if (batch->nr == batch->max) { if (!tlb_next_batch(tlb)) return 0; - batch = tlb->active; } VM_BUG_ON(batch->nr > batch->max); diff --git a/trunk/mm/nommu.c b/trunk/mm/nommu.c index 9edc897a3970..1fd0c51b10a6 100644 --- a/trunk/mm/nommu.c +++ b/trunk/mm/nommu.c @@ -1813,13 +1813,10 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, return NULL; } -int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, - unsigned long pfn, unsigned long size, pgprot_t prot) +int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, + unsigned long to, unsigned long size, pgprot_t prot) { - if (addr != (pfn << PAGE_SHIFT)) - return -EINVAL; - - vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; + vma->vm_start = vma->vm_pgoff << PAGE_SHIFT; return 0; } EXPORT_SYMBOL(remap_pfn_range); diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index 5ed24b94c5e6..4f49535d4cd3 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -2326,7 +2326,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining, return true; /* Check the watermark levels */ - for (i = 0; i <= classzone_idx; i++) { + for (i = 0; i < pgdat->nr_zones; i++) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) @@ -2344,7 +2344,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining, } if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), - i, 0)) + classzone_idx, 0)) all_zones_ok = false; else balanced += zone->present_pages; @@ -2451,6 +2451,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), 0, 0)) { end_zone = i; + *classzone_idx = i; break; } } @@ -2509,18 +2510,18 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, KSWAPD_ZONE_BALANCE_GAP_RATIO); if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) + balance_gap, - end_zone, 0)) { + end_zone, 0)) shrink_zone(priority, zone, &sc); + reclaim_state->reclaimed_slab = 0; + nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); + sc.nr_reclaimed += reclaim_state->reclaimed_slab; + total_scanned += sc.nr_scanned; - reclaim_state->reclaimed_slab = 0; - nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); - sc.nr_reclaimed += reclaim_state->reclaimed_slab; - total_scanned += sc.nr_scanned; - - if (nr_slab == 0 && !zone_reclaimable(zone)) - zone->all_unreclaimable = 1; - } - + if (zone->all_unreclaimable) + continue; + if (nr_slab == 0 && + !zone_reclaimable(zone)) + zone->all_unreclaimable = 1; /* * If we've done a decent amount of scanning and * the reclaim ratio is low, start doing writepage @@ -2530,12 +2531,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) sc.may_writepage = 1; - if (zone->all_unreclaimable) { - if (end_zone && end_zone == i) - end_zone--; - continue; - } - if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), end_zone, 0)) { all_zones_ok = 0; @@ -2714,8 +2709,8 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) */ static int kswapd(void *p) { - unsigned long order, new_order; - int classzone_idx, new_classzone_idx; + unsigned long order; + int classzone_idx; pg_data_t *pgdat = (pg_data_t*)p; struct task_struct *tsk = current; @@ -2745,23 +2740,17 @@ static int kswapd(void *p) tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; set_freezable(); - order = new_order = 0; - classzone_idx = new_classzone_idx = pgdat->nr_zones - 1; + order = 0; + classzone_idx = MAX_NR_ZONES - 1; for ( ; ; ) { + unsigned long new_order; + int new_classzone_idx; int ret; - /* - * If the last balance_pgdat was unsuccessful it's unlikely a - * new request of a similar or harder type will succeed soon - * so consider going to sleep on the basis we reclaimed at - */ - if (classzone_idx >= new_classzone_idx && order == new_order) { - new_order = pgdat->kswapd_max_order; - new_classzone_idx = pgdat->classzone_idx; - pgdat->kswapd_max_order = 0; - pgdat->classzone_idx = pgdat->nr_zones - 1; - } - + new_order = pgdat->kswapd_max_order; + new_classzone_idx = pgdat->classzone_idx; + pgdat->kswapd_max_order = 0; + pgdat->classzone_idx = MAX_NR_ZONES - 1; if (order < new_order || classzone_idx > new_classzone_idx) { /* * Don't sleep if someone wants a larger 'order' @@ -2774,7 +2763,7 @@ static int kswapd(void *p) order = pgdat->kswapd_max_order; classzone_idx = pgdat->classzone_idx; pgdat->kswapd_max_order = 0; - pgdat->classzone_idx = pgdat->nr_zones - 1; + pgdat->classzone_idx = MAX_NR_ZONES - 1; } ret = try_to_freeze();