Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 105337
b: refs/heads/master
c: b69a728
h: refs/heads/master
i:
  105335: 803b910
v: v3
  • Loading branch information
Adrian Bunk authored and Linus Torvalds committed Jul 24, 2008
1 parent 23697bb commit 768ec39
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 13 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2be0ffe2b29bd31d3debd0877797892ff2d91f4c
refs/heads/master: b69a7288ea7bf171328f313f0edae629f50e3bdb
25 changes: 13 additions & 12 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,9 +153,9 @@ static unsigned long __meminitdata dma_reserve;
static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];

/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
Expand Down Expand Up @@ -674,9 +674,9 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
* Note that start_page and end_pages are not aligned on a pageblock
* boundary. If alignment is required, use move_freepages_block()
*/
int move_freepages(struct zone *zone,
struct page *start_page, struct page *end_page,
int migratetype)
static int move_freepages(struct zone *zone,
struct page *start_page, struct page *end_page,
int migratetype)
{
struct page *page;
unsigned long order;
Expand Down Expand Up @@ -715,7 +715,8 @@ int move_freepages(struct zone *zone,
return pages_moved;
}

int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
static int move_freepages_block(struct zone *zone, struct page *page,
int migratetype)
{
unsigned long start_pfn, end_pfn;
struct page *start_page, *end_page;
Expand Down Expand Up @@ -2652,7 +2653,7 @@ static int zone_batchsize(struct zone *zone)
return batch;
}

inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
{
struct per_cpu_pages *pcp;

Expand Down Expand Up @@ -3099,7 +3100,7 @@ void __meminit get_pfn_range_for_nid(unsigned int nid,
* assumption is made that zones within a node are ordered in monotonic
* increasing memory addresses so that the "highest" populated zone is used
*/
void __init find_usable_zone_for_movable(void)
static void __init find_usable_zone_for_movable(void)
{
int zone_index;
for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
Expand All @@ -3125,7 +3126,7 @@ void __init find_usable_zone_for_movable(void)
* highest usable zone for ZONE_MOVABLE. This preserves the assumption that
* zones within a node are in order of monotonic increases memory addresses
*/
void __meminit adjust_zone_range_for_zone_movable(int nid,
static void __meminit adjust_zone_range_for_zone_movable(int nid,
unsigned long zone_type,
unsigned long node_start_pfn,
unsigned long node_end_pfn,
Expand Down Expand Up @@ -3186,7 +3187,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
* then all holes in the requested range will be accounted for.
*/
unsigned long __meminit __absent_pages_in_range(int nid,
static unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
unsigned long range_end_pfn)
{
Expand Down Expand Up @@ -3723,7 +3724,7 @@ static void __init sort_node_map(void)
}

/* Find the lowest pfn for a node */
unsigned long __init find_min_pfn_for_node(int nid)
static unsigned long __init find_min_pfn_for_node(int nid)
{
int i;
unsigned long min_pfn = ULONG_MAX;
Expand Down Expand Up @@ -3795,7 +3796,7 @@ static unsigned long __init early_calculate_totalpages(void)
* memory. When they don't, some nodes will have more kernelcore than
* others
*/
void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
{
int i, nid;
unsigned long usable_startpfn;
Expand Down

0 comments on commit 768ec39

Please sign in to comment.