Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 347869
b: refs/heads/master
c: c8b74c2
h: refs/heads/master
i:
  347867: ebd3cf5
v: v3
  • Loading branch information
Sonny Rao authored and Linus Torvalds committed Dec 21, 2012
1 parent 568ac70 commit 331b501
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 6 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 010fc29a45a2e8dbc08bf45ef80b8622619aaae0
refs/heads/master: c8b74c2f6604923de91f8aa6539f8bb934736754
25 changes: 20 additions & 5 deletions trunk/mm/page-writeback.c
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,18 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
x += zone_page_state(z, NR_FREE_PAGES) +
zone_reclaimable_pages(z) - z->dirty_balance_reserve;
}
/*
* Unreclaimable memory (kernel memory or anonymous memory
* without swap) can bring down the dirtyable pages below
* the zone's dirty balance reserve and the above calculation
* will underflow. However we still want to add in nodes
* which are below threshold (negative values) to get a more
* accurate calculation but make sure that the total never
* underflows.
*/
if ((long)x < 0)
x = 0;

/*
* Make sure that the number of highmem pages is never larger
* than the number of the total dirtyable memory. This can only
Expand All @@ -222,8 +234,8 @@ static unsigned long global_dirtyable_memory(void)
{
unsigned long x;

x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() -
dirty_balance_reserve;
x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
x -= min(x, dirty_balance_reserve);

if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
Expand Down Expand Up @@ -290,9 +302,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone)
* highmem zone can hold its share of dirty pages, so we don't
* care about vm_highmem_is_dirtyable here.
*/
return zone_page_state(zone, NR_FREE_PAGES) +
zone_reclaimable_pages(zone) -
zone->dirty_balance_reserve;
unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
zone_reclaimable_pages(zone);

/* don't allow this to underflow */
nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
return nr_pages;
}

/**
Expand Down

0 comments on commit 331b501

Please sign in to comment.