Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 207246
b: refs/heads/master
c: 25edde0
h: refs/heads/master
v: v3
  • Loading branch information
KOSAKI Motohiro authored and Linus Torvalds committed Aug 10, 2010
1 parent 2fc6fd3 commit c478469
Show file tree
Hide file tree
Showing 7 changed files with 1 addition and 113 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: b898cc70019ce1835bbf6c47bdf978adc36faa42
refs/heads/master: 25edde0332916ae706ccf83de688be57bcc844b7
5 changes: 0 additions & 5 deletions trunk/include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,11 +98,6 @@ extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
/*
* For memory reclaim.
*/
extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem);
extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
int priority);
extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
int priority);
int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
Expand Down
15 changes: 0 additions & 15 deletions trunk/include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -347,21 +347,6 @@ struct zone {
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];

/*
* prev_priority holds the scanning priority for this zone. It is
* defined as the scanning priority at which we achieved our reclaim
* target at the previous try_to_free_pages() or balance_pgdat()
* invocation.
*
* We use prev_priority as a measure of how much stress page reclaim is
* under - it drives the swappiness decision: whether to unmap mapped
* pages.
*
* Access to both this field is quite racy even on uniprocessor. But
* it is expected to average out OK.
*/
int prev_priority;

/*
* The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
* this zone's LRU. Maintained by the pageout code.
Expand Down
31 changes: 0 additions & 31 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -211,8 +211,6 @@ struct mem_cgroup {
*/
spinlock_t reclaim_param_lock;

int prev_priority; /* for recording reclaim priority */

/*
* While reclaiming in a hierarchy, we cache the last child we
* reclaimed from.
Expand Down Expand Up @@ -858,35 +856,6 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
return ret;
}

/*
* prev_priority control...this will be used in memory reclaim path.
*/
int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
{
int prev_priority;

spin_lock(&mem->reclaim_param_lock);
prev_priority = mem->prev_priority;
spin_unlock(&mem->reclaim_param_lock);

return prev_priority;
}

void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
{
spin_lock(&mem->reclaim_param_lock);
if (priority < mem->prev_priority)
mem->prev_priority = priority;
spin_unlock(&mem->reclaim_param_lock);
}

void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
{
spin_lock(&mem->reclaim_param_lock);
mem->prev_priority = priority;
spin_unlock(&mem->reclaim_param_lock);
}

static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
{
unsigned long active;
Expand Down
2 changes: 0 additions & 2 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -4100,8 +4100,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
zone_seqlock_init(zone);
zone->zone_pgdat = pgdat;

zone->prev_priority = DEF_PRIORITY;

zone_pcp_init(zone);
for_each_lru(l) {
INIT_LIST_HEAD(&zone->lru[l].list);
Expand Down
57 changes: 0 additions & 57 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -1289,20 +1289,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
return nr_reclaimed;
}

/*
* We are about to scan this zone at a certain priority level. If that priority
* level is smaller (ie: more urgent) than the previous priority, then note
* that priority level within the zone. This is done so that when the next
* process comes in to scan this zone, it will immediately start out at this
* priority level rather than having to build up its own scanning priority.
* Here, this priority affects only the reclaim-mapped threshold.
*/
static inline void note_zone_scanning_priority(struct zone *zone, int priority)
{
if (priority < zone->prev_priority)
zone->prev_priority = priority;
}

/*
* This moves pages from the active list to the inactive list.
*
Expand Down Expand Up @@ -1766,17 +1752,8 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
if (scanning_global_lru(sc)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
note_zone_scanning_priority(zone, priority);

if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
} else {
/*
* Ignore cpuset limitation here. We just want to reduce
* # of used pages by us regardless of memory shortage.
*/
mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
priority);
}

shrink_zone(priority, zone, sc);
Expand Down Expand Up @@ -1877,17 +1854,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
if (priority < 0)
priority = 0;

if (scanning_global_lru(sc)) {
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {

if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;

zone->prev_priority = priority;
}
} else
mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);

delayacct_freepages_end();
put_mems_allowed();

Expand Down Expand Up @@ -2053,22 +2019,12 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
.order = order,
.mem_cgroup = NULL,
};
/*
* temp_priority is used to remember the scanning priority at which
* this zone was successfully refilled to
* free_pages == high_wmark_pages(zone).
*/
int temp_priority[MAX_NR_ZONES];

loop_again:
total_scanned = 0;
sc.nr_reclaimed = 0;
sc.may_writepage = !laptop_mode;
count_vm_event(PAGEOUTRUN);

for (i = 0; i < pgdat->nr_zones; i++)
temp_priority[i] = DEF_PRIORITY;

for (priority = DEF_PRIORITY; priority >= 0; priority--) {
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long lru_pages = 0;
Expand Down Expand Up @@ -2136,9 +2092,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue;

temp_priority[i] = priority;
sc.nr_scanned = 0;
note_zone_scanning_priority(zone, priority);

nid = pgdat->node_id;
zid = zone_idx(zone);
Expand Down Expand Up @@ -2211,16 +2165,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
break;
}
out:
/*
* Note within each zone the priority level at which this zone was
* brought into a happy state. So that the next thread which scans this
* zone will start out at that priority level.
*/
for (i = 0; i < pgdat->nr_zones; i++) {
struct zone *zone = pgdat->node_zones + i;

zone->prev_priority = temp_priority[i];
}
if (!all_zones_ok) {
cond_resched();

Expand Down Expand Up @@ -2639,7 +2583,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
*/
priority = ZONE_RECLAIM_PRIORITY;
do {
note_zone_scanning_priority(zone, priority);
shrink_zone(priority, zone, &sc);
priority--;
} while (priority >= 0 && sc.nr_reclaimed < nr_pages);
Expand Down
2 changes: 0 additions & 2 deletions trunk/mm/vmstat.c
Original file line number Diff line number Diff line change
Expand Up @@ -853,11 +853,9 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
}
seq_printf(m,
"\n all_unreclaimable: %u"
"\n prev_priority: %i"
"\n start_pfn: %lu"
"\n inactive_ratio: %u",
zone->all_unreclaimable,
zone->prev_priority,
zone->zone_start_pfn,
zone->inactive_ratio);
seq_putc(m, '\n');
Expand Down

0 comments on commit c478469

Please sign in to comment.