Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 31753
b: refs/heads/master
c: 9614634
h: refs/heads/master
i:
  31751: 7ccc06e
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed Jul 3, 2006
1 parent 252fd2a commit 3481be3
Show file tree
Hide file tree
Showing 8 changed files with 70 additions and 15 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: cb6358eb69d9854f65f2979c0ce9280eee041828
refs/heads/master: 9614634fe6a138fd8ae044950700d2af8d203f97
14 changes: 14 additions & 0 deletions trunk/Documentation/sysctl/vm.txt
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ Currently, these files are in /proc/sys/vm:
- block_dump
- drop-caches
- zone_reclaim_mode
- min_unmapped_ratio
- panic_on_oom

==============================================================
Expand Down Expand Up @@ -168,6 +169,19 @@ in all nodes of the system.

=============================================================

min_unmapped_ratio:

This is available only on NUMA kernels.

A percentage of the file backed pages in each zone. Zone reclaim will only
occur if more than this percentage of pages are file backed and unmapped.
This is to insure that a minimal amount of local pages is still available for
file I/O even if the node is overallocated.

The default is 1 percent.

=============================================================

panic_on_oom

This enables or disables panic on out-of-memory feature. If this is set to 1,
Expand Down
6 changes: 6 additions & 0 deletions trunk/include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,10 @@ struct zone {
unsigned long lowmem_reserve[MAX_NR_ZONES];

#ifdef CONFIG_NUMA
/*
* zone reclaim becomes active if more unmapped pages exist.
*/
unsigned long min_unmapped_ratio;
struct per_cpu_pageset *pageset[NR_CPUS];
#else
struct per_cpu_pageset pageset[NR_CPUS];
Expand Down Expand Up @@ -414,6 +418,8 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
struct file *, void __user *, size_t *, loff_t *);

#include <linux/topology.h>
/* Returns the number of the current Node. */
Expand Down
1 change: 1 addition & 0 deletions trunk/include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,7 @@ extern long vm_total_pages;

#ifdef CONFIG_NUMA
extern int zone_reclaim_mode;
extern int sysctl_min_unmapped_ratio;
extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
#else
#define zone_reclaim_mode 0
Expand Down
2 changes: 1 addition & 1 deletion trunk/include/linux/sysctl.h
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ enum
VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */
VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */
VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */
VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */
VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */
VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
};
Expand Down
11 changes: 11 additions & 0 deletions trunk/kernel/sysctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -932,6 +932,17 @@ static ctl_table vm_table[] = {
.strategy = &sysctl_intvec,
.extra1 = &zero,
},
{
.ctl_name = VM_MIN_UNMAPPED,
.procname = "min_unmapped_ratio",
.data = &sysctl_min_unmapped_ratio,
.maxlen = sizeof(sysctl_min_unmapped_ratio),
.mode = 0644,
.proc_handler = &sysctl_min_unmapped_ratio_sysctl_handler,
.strategy = &sysctl_intvec,
.extra1 = &zero,
.extra2 = &one_hundred,
},
#endif
#ifdef CONFIG_X86_32
{
Expand Down
22 changes: 22 additions & 0 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2005,6 +2005,10 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,

zone->spanned_pages = size;
zone->present_pages = realsize;
#ifdef CONFIG_NUMA
zone->min_unmapped_ratio = (realsize*sysctl_min_unmapped_ratio)
/ 100;
#endif
zone->name = zone_names[j];
spin_lock_init(&zone->lock);
spin_lock_init(&zone->lru_lock);
Expand Down Expand Up @@ -2298,6 +2302,24 @@ int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
return 0;
}

#ifdef CONFIG_NUMA
int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
{
struct zone *zone;
int rc;

rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
if (rc)
return rc;

for_each_zone(zone)
zone->min_unmapped_ratio = (zone->present_pages *
sysctl_min_unmapped_ratio) / 100;
return 0;
}
#endif

/*
* lowmem_reserve_ratio_sysctl_handler - just a wrapper around
* proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
Expand Down
27 changes: 14 additions & 13 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -1503,10 +1503,6 @@ module_init(kswapd_init)
*
* If non-zero call zone_reclaim when the number of free pages falls below
* the watermarks.
*
* In the future we may add flags to the mode. However, the page allocator
* should only have to check that zone_reclaim_mode != 0 before calling
* zone_reclaim().
*/
int zone_reclaim_mode __read_mostly;

Expand All @@ -1523,6 +1519,12 @@ int zone_reclaim_mode __read_mostly;
*/
#define ZONE_RECLAIM_PRIORITY 4

/*
* Percentage of pages in a zone that must be unmapped for zone_reclaim to
* occur.
*/
int sysctl_min_unmapped_ratio = 1;

/*
* Try to free up some pages from this zone through reclaim.
*/
Expand Down Expand Up @@ -1590,18 +1592,17 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
int node_id;

/*
* Do not reclaim if there are not enough reclaimable pages in this
* zone that would satify this allocations.
* Zone reclaim reclaims unmapped file backed pages.
*
* All unmapped pagecache pages are reclaimable.
*
* Both counters may be temporarily off a bit so we use
* SWAP_CLUSTER_MAX as the boundary. It may also be good to
* leave a few frequently used unmapped pagecache pages around.
* A small portion of unmapped file backed pages is needed for
* file I/O otherwise pages read by file I/O will be immediately
* thrown out if the zone is overallocated. So we do not reclaim
* if less than a specified percentage of the zone is used by
* unmapped file backed pages.
*/
if (zone_page_state(zone, NR_FILE_PAGES) -
zone_page_state(zone, NR_FILE_MAPPED) < SWAP_CLUSTER_MAX)
return 0;
zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_ratio)
return 0;

/*
* Avoid concurrent zone reclaims, do not reclaim in a zone that does
Expand Down

0 comments on commit 3481be3

Please sign in to comment.