Skip to content

Commit

Permalink
[PATCH] reduce MAX_NR_ZONES: use enum to define zones, reformat and c…
Browse files Browse the repository at this point in the history
…omment

Use enum for zones and reformat zones dependent information

Add comments explaning the use of zones and add a zones_t type for zone
numbers.

Line up information that will be #ifdefd by the following patches.

[akpm@osdl.org: comment cleanups]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed Sep 26, 2006
1 parent 98d2b0e commit 2f1b624
Show file tree
Hide file tree
Showing 3 changed files with 69 additions and 28 deletions.
7 changes: 4 additions & 3 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -470,7 +470,7 @@ void split_page(struct page *page, unsigned int order);
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1)

static inline unsigned long page_zonenum(struct page *page)
static inline enum zone_type page_zonenum(struct page *page)
{
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
}
Expand Down Expand Up @@ -499,11 +499,12 @@ static inline unsigned long page_to_section(struct page *page)
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}

static inline void set_page_zone(struct page *page, unsigned long zone)
static inline void set_page_zone(struct page *page, enum zone_type zone)
{
page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
}

static inline void set_page_node(struct page *page, unsigned long node)
{
page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
Expand All @@ -515,7 +516,7 @@ static inline void set_page_section(struct page *page, unsigned long section)
page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}

static inline void set_page_links(struct page *page, unsigned long zone,
static inline void set_page_links(struct page *page, enum zone_type zone,
unsigned long node, unsigned long pfn)
{
set_page_zone(page, zone);
Expand Down
66 changes: 47 additions & 19 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,14 +88,53 @@ struct per_cpu_pageset {
#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
#endif

#define ZONE_DMA 0
#define ZONE_DMA32 1
#define ZONE_NORMAL 2
#define ZONE_HIGHMEM 3
enum zone_type {
/*
* ZONE_DMA is used when there are devices that are not able
* to do DMA to all of addressable memory (ZONE_NORMAL). Then we
* carve out the portion of memory that is needed for these devices.
* The range is arch specific.
*
* Some examples
*
* Architecture Limit
* ---------------------------
* parisc, ia64, sparc <4G
* s390 <2G
* arm26 <48M
* arm Various
* alpha Unlimited or 0-16MB.
*
* i386, x86_64 and multiple other arches
* <16M.
*/
ZONE_DMA,
/*
* x86_64 needs two ZONE_DMAs because it supports devices that are
* only able to do DMA to the lower 16M but also 32 bit devices that
* can only do DMA areas below 4G.
*/
ZONE_DMA32,
/*
* Normal addressable memory is in ZONE_NORMAL. DMA operations can be
* performed on pages in ZONE_NORMAL if the DMA devices support
* transfers to all addressable memory.
*/
ZONE_NORMAL,
/*
* A memory area that is only addressable by the kernel through
* mapping portions into its own address space. This is for example
* used by i386 to allow the kernel to address the memory beyond
* 900MB. The kernel will set up special mappings (page
* table entries on i386) for each page that the kernel needs to
* access.
*/
ZONE_HIGHMEM,

#define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */
#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */
MAX_NR_ZONES
};

#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */

/*
* When a memory allocation must conform to specific limitations (such
Expand Down Expand Up @@ -126,16 +165,6 @@ struct per_cpu_pageset {
/* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */
#define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */

/*
* On machines where it is needed (eg PCs) we divide physical memory
* into multiple physical zones. On a 32bit PC we have 4 zones:
*
* ZONE_DMA < 16 MB ISA DMA capable memory
* ZONE_DMA32 0 MB Empty
* ZONE_NORMAL 16-896 MB direct mapped by the kernel
* ZONE_HIGHMEM > 896 MB only page cache and user processes
*/

struct zone {
/* Fields commonly accessed by the page allocator */
unsigned long free_pages;
Expand Down Expand Up @@ -266,7 +295,6 @@ struct zone {
char *name;
} ____cacheline_internodealigned_in_smp;


/*
* The "priority" of VM scanning is how much of the queues we will scan in one
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
Expand Down Expand Up @@ -373,12 +401,12 @@ static inline int populated_zone(struct zone *zone)
return (!!zone->present_pages);
}

static inline int is_highmem_idx(int idx)
static inline int is_highmem_idx(enum zone_type idx)
{
return (idx == ZONE_HIGHMEM);
}

static inline int is_normal_idx(int idx)
static inline int is_normal_idx(enum zone_type idx)
{
return (idx == ZONE_NORMAL);
}
Expand Down
24 changes: 18 additions & 6 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,11 @@ static void __free_pages_ok(struct page *page, unsigned int order);
* TBD: should special case ZONE_DMA32 machines here - in those we normally
* don't need any ZONE_NORMAL reservation
*/
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 };
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
256,
256,
32
};

EXPORT_SYMBOL(totalram_pages);

Expand All @@ -79,7 +83,13 @@ EXPORT_SYMBOL(totalram_pages);
struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
EXPORT_SYMBOL(zone_table);

static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" };
static char *zone_names[MAX_NR_ZONES] = {
"DMA",
"DMA32",
"Normal",
"HighMem"
};

int min_free_kbytes = 1024;

unsigned long __meminitdata nr_kernel_pages;
Expand Down Expand Up @@ -1487,7 +1497,9 @@ static void __meminit build_zonelists(pg_data_t *pgdat)

static void __meminit build_zonelists(pg_data_t *pgdat)
{
int i, j, k, node, local_node;
int i, node, local_node;
enum zone_type k;
enum zone_type j;

local_node = pgdat->node_id;
for (i = 0; i < GFP_ZONETYPES; i++) {
Expand Down Expand Up @@ -1675,8 +1687,8 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
}

#define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr)
void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
unsigned long size)
void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
unsigned long pfn, unsigned long size)
{
unsigned long snum = pfn_to_section_nr(pfn);
unsigned long end = pfn_to_section_nr(pfn + size);
Expand Down Expand Up @@ -1960,7 +1972,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
static void __meminit free_area_init_core(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
{
unsigned long j;
enum zone_type j;
int nid = pgdat->node_id;
unsigned long zone_start_pfn = pgdat->node_start_pfn;
int ret;
Expand Down

0 comments on commit 2f1b624

Please sign in to comment.