Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 283009
b: refs/heads/master
c: ab8fabd
h: refs/heads/master
i:
  283007: 848b852
v: v3
  • Loading branch information
Johannes Weiner authored and Linus Torvalds committed Jan 11, 2012
1 parent 1b733a7 commit 4e1125f
Show file tree
Hide file tree
Showing 5 changed files with 30 additions and 3 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 25bd91bd27820d5971258cecd1c0e64b0e485144
refs/heads/master: ab8fabd46f811d5153d8a0cd2fac9a0d41fb593d
6 changes: 6 additions & 0 deletions trunk/include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -317,6 +317,12 @@ struct zone {
*/
unsigned long lowmem_reserve[MAX_NR_ZONES];

/*
* This is a per-zone reserve of pages that should not be
* considered dirtyable memory.
*/
unsigned long dirty_balance_reserve;

#ifdef CONFIG_NUMA
int node;
/*
Expand Down
1 change: 1 addition & 0 deletions trunk/include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,7 @@ struct swap_list_t {
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
extern unsigned long totalreserve_pages;
extern unsigned long dirty_balance_reserve;
extern unsigned int nr_free_buffer_pages(void);
extern unsigned int nr_free_pagecache_pages(void);

Expand Down
5 changes: 3 additions & 2 deletions trunk/mm/page-writeback.c
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];

x += zone_page_state(z, NR_FREE_PAGES) +
zone_reclaimable_pages(z);
zone_reclaimable_pages(z) - z->dirty_balance_reserve;
}
/*
* Make sure that the number of highmem pages is never larger
Expand All @@ -181,7 +181,8 @@ static unsigned long determine_dirtyable_memory(void)
{
unsigned long x;

x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() -
dirty_balance_reserve;

if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
Expand Down
19 changes: 19 additions & 0 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,14 @@ EXPORT_SYMBOL(node_states);

unsigned long totalram_pages __read_mostly;
unsigned long totalreserve_pages __read_mostly;
/*
* When calculating the number of globally allowed dirty pages, there
* is a certain number of per-zone reserves that should not be
* considered dirtyable memory. This is the sum of those reserves
* over all existing zones that contribute dirtyable memory.
*/
unsigned long dirty_balance_reserve __read_mostly;

int percpu_pagelist_fraction;
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;

Expand Down Expand Up @@ -4822,8 +4830,19 @@ static void calculate_totalreserve_pages(void)
if (max > zone->present_pages)
max = zone->present_pages;
reserve_pages += max;
/*
* Lowmem reserves are not available to
* GFP_HIGHUSER page cache allocations and
* kswapd tries to balance zones to their high
* watermark. As a result, neither should be
* regarded as dirtyable memory, to prevent a
* situation where reclaim has to clean pages
* in order to balance the zones.
*/
zone->dirty_balance_reserve = max;
}
}
dirty_balance_reserve = reserve_pages;
totalreserve_pages = reserve_pages;
}

Expand Down

0 comments on commit 4e1125f

Please sign in to comment.