Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 31376
b: refs/heads/master
c: 34aa133
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed Jun 30, 2006
1 parent b014267 commit 1e719d0
Show file tree
Hide file tree
Showing 6 changed files with 11 additions and 51 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f3dbd34460ff54962d3e3244b6bcb7f5295356e6
refs/heads/master: 34aa1330f9b3c5783d269851d467326525207422
13 changes: 0 additions & 13 deletions trunk/Documentation/sysctl/vm.txt
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ Currently, these files are in /proc/sys/vm:
- block_dump
- drop-caches
- zone_reclaim_mode
- zone_reclaim_interval
- panic_on_oom

==============================================================
Expand Down Expand Up @@ -167,18 +166,6 @@ use of files and builds up large slab caches. However, the slab
shrink operation is global, may take a long time and free slabs
in all nodes of the system.

================================================================

zone_reclaim_interval:

The time allowed for off node allocations after zone reclaim
has failed to reclaim enough pages to allow a local allocation.

Time is set in seconds and set by default to 30 seconds.

Reduce the interval if undesired off node allocations occur. However, too
frequent scans will have a negative impact onoff node allocation performance.

=============================================================

panic_on_oom
Expand Down
6 changes: 0 additions & 6 deletions trunk/include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -178,12 +178,6 @@ struct zone {

/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
/*
* timestamp (in jiffies) of the last zone reclaim that did not
* result in freeing of pages. This is used to avoid repeated scans
* if all memory in the zone is in use.
*/
unsigned long last_unsuccessful_zone_reclaim;

/*
* prev_priority holds the scanning priority for this zone. It is
Expand Down
1 change: 0 additions & 1 deletion trunk/include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,6 @@ extern long vm_total_pages;

#ifdef CONFIG_NUMA
extern int zone_reclaim_mode;
extern int zone_reclaim_interval;
extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
#else
#define zone_reclaim_mode 0
Expand Down
9 changes: 0 additions & 9 deletions trunk/kernel/sysctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -933,15 +933,6 @@ static ctl_table vm_table[] = {
.strategy = &sysctl_intvec,
.extra1 = &zero,
},
{
.ctl_name = VM_ZONE_RECLAIM_INTERVAL,
.procname = "zone_reclaim_interval",
.data = &zone_reclaim_interval,
.maxlen = sizeof(zone_reclaim_interval),
.mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
.strategy = &sysctl_jiffies,
},
#endif
#ifdef CONFIG_X86_32
{
Expand Down
31 changes: 10 additions & 21 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -1517,11 +1517,6 @@ int zone_reclaim_mode __read_mostly;
#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */

/*
* Mininum time between zone reclaim scans
*/
int zone_reclaim_interval __read_mostly = 30*HZ;

/*
* Priority for ZONE_RECLAIM. This determines the fraction of pages
* of a node considered for each zone_reclaim. 4 scans 1/16th of
Expand Down Expand Up @@ -1587,16 +1582,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)

p->reclaim_state = NULL;
current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);

if (nr_reclaimed == 0) {
/*
* We were unable to reclaim enough pages to stay on node. We
* now allow off node accesses for a certain time period before
* trying again to reclaim pages from the local zone.
*/
zone->last_unsuccessful_zone_reclaim = jiffies;
}

return nr_reclaimed >= nr_pages;
}

Expand All @@ -1606,13 +1591,17 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
int node_id;

/*
* Do not reclaim if there was a recent unsuccessful attempt at zone
* reclaim. In that case we let allocations go off node for the
* zone_reclaim_interval. Otherwise we would scan for each off-node
* page allocation.
* Do not reclaim if there are not enough reclaimable pages in this
* zone that would satify this allocations.
*
* All unmapped pagecache pages are reclaimable.
*
* Both counters may be temporarily off a bit so we use
* SWAP_CLUSTER_MAX as the boundary. It may also be good to
* leave a few frequently used unmapped pagecache pages around.
*/
if (time_before(jiffies,
zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval))
if (zone_page_state(zone, NR_FILE_PAGES) -
zone_page_state(zone, NR_FILE_MAPPED) < SWAP_CLUSTER_MAX)
return 0;

/*
Expand Down

0 comments on commit 1e719d0

Please sign in to comment.