Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 164391
b: refs/heads/master
c: adea02a
h: refs/heads/master
i:
  164389: 81b8536
  164387: 6cf4869
  164383: b364ffd
v: v3
  • Loading branch information
Wu Fengguang authored and Linus Torvalds committed Sep 22, 2009
1 parent 9ed3898 commit c38f9cb
Show file tree
Hide file tree
Showing 4 changed files with 45 additions and 23 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 55c37a840d9ec0ebed5c944355156d490b1ad5d1
refs/heads/master: adea02a1bea71a508da32c04d715485a1fe62029
11 changes: 2 additions & 9 deletions trunk/include/linux/vmstat.h
Original file line number Diff line number Diff line change
Expand Up @@ -166,15 +166,8 @@ static inline unsigned long zone_page_state(struct zone *zone,
return x;
}

extern unsigned long global_lru_pages(void);

static inline unsigned long zone_lru_pages(struct zone *zone)
{
return (zone_page_state(zone, NR_ACTIVE_ANON)
+ zone_page_state(zone, NR_ACTIVE_FILE)
+ zone_page_state(zone, NR_INACTIVE_ANON)
+ zone_page_state(zone, NR_INACTIVE_FILE));
}
extern unsigned long global_reclaimable_pages(void);
extern unsigned long zone_reclaimable_pages(struct zone *zone);

#ifdef CONFIG_NUMA
/*
Expand Down
5 changes: 3 additions & 2 deletions trunk/mm/page-writeback.c
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,8 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
struct zone *z =
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];

x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z);
x += zone_page_state(z, NR_FREE_PAGES) +
zone_reclaimable_pages(z);
}
/*
* Make sure that the number of highmem pages is never larger
Expand All @@ -404,7 +405,7 @@ unsigned long determine_dirtyable_memory(void)
{
unsigned long x;

x = global_page_state(NR_FREE_PAGES) + global_lru_pages();
x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();

if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
Expand Down
50 changes: 39 additions & 11 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -1734,7 +1734,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;

lru_pages += zone_lru_pages(zone);
lru_pages += zone_reclaimable_pages(zone);
}
}

Expand Down Expand Up @@ -1951,7 +1951,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;

lru_pages += zone_lru_pages(zone);
lru_pages += zone_reclaimable_pages(zone);
}

/*
Expand Down Expand Up @@ -1995,7 +1995,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
if (zone_is_all_unreclaimable(zone))
continue;
if (nr_slab == 0 && zone->pages_scanned >=
(zone_lru_pages(zone) * 6))
(zone_reclaimable_pages(zone) * 6))
zone_set_flag(zone,
ZONE_ALL_UNRECLAIMABLE);
/*
Expand Down Expand Up @@ -2162,12 +2162,39 @@ void wakeup_kswapd(struct zone *zone, int order)
wake_up_interruptible(&pgdat->kswapd_wait);
}

unsigned long global_lru_pages(void)
/*
* The reclaimable count would be mostly accurate.
* The less reclaimable pages may be
* - mlocked pages, which will be moved to unevictable list when encountered
* - mapped pages, which may require several travels to be reclaimed
* - dirty pages, which is not "instantly" reclaimable
*/
unsigned long global_reclaimable_pages(void)
{
return global_page_state(NR_ACTIVE_ANON)
+ global_page_state(NR_ACTIVE_FILE)
+ global_page_state(NR_INACTIVE_ANON)
+ global_page_state(NR_INACTIVE_FILE);
int nr;

nr = global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_FILE);

if (nr_swap_pages > 0)
nr += global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_INACTIVE_ANON);

return nr;
}

unsigned long zone_reclaimable_pages(struct zone *zone)
{
int nr;

nr = zone_page_state(zone, NR_ACTIVE_FILE) +
zone_page_state(zone, NR_INACTIVE_FILE);

if (nr_swap_pages > 0)
nr += zone_page_state(zone, NR_ACTIVE_ANON) +
zone_page_state(zone, NR_INACTIVE_ANON);

return nr;
}

#ifdef CONFIG_HIBERNATION
Expand Down Expand Up @@ -2239,7 +2266,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)

current->reclaim_state = &reclaim_state;

lru_pages = global_lru_pages();
lru_pages = global_reclaimable_pages();
nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
/* If slab caches are huge, it's better to hit them first */
while (nr_slab >= lru_pages) {
Expand Down Expand Up @@ -2281,7 +2308,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)

reclaim_state.reclaimed_slab = 0;
shrink_slab(sc.nr_scanned, sc.gfp_mask,
global_lru_pages());
global_reclaimable_pages());
sc.nr_reclaimed += reclaim_state.reclaimed_slab;
if (sc.nr_reclaimed >= nr_pages)
goto out;
Expand All @@ -2298,7 +2325,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
if (!sc.nr_reclaimed) {
do {
reclaim_state.reclaimed_slab = 0;
shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
shrink_slab(nr_pages, sc.gfp_mask,
global_reclaimable_pages());
sc.nr_reclaimed += reclaim_state.reclaimed_slab;
} while (sc.nr_reclaimed < nr_pages &&
reclaim_state.reclaimed_slab > 0);
Expand Down

0 comments on commit c38f9cb

Please sign in to comment.