Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 164463
b: refs/heads/master
c: 5f8dcc2
h: refs/heads/master
i:
  164461: 014b1b8
  164459: 936b470
  164455: 15952aa
  164447: 2f36e9a
v: v3
  • Loading branch information
Mel Gorman authored and Linus Torvalds committed Sep 22, 2009
1 parent a94950b commit f773c7b
Show file tree
Hide file tree
Showing 3 changed files with 64 additions and 49 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5d863b89688e5811cd9e5bd0082cb38abe03adf3
refs/heads/master: 5f8dcc21211a3d4e3a7a5ca366b469fb88117f61
5 changes: 4 additions & 1 deletion trunk/include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
#define MIGRATE_UNMOVABLE 0
#define MIGRATE_RECLAIMABLE 1
#define MIGRATE_MOVABLE 2
#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */
#define MIGRATE_RESERVE 3
#define MIGRATE_ISOLATE 4 /* can't allocate from here */
#define MIGRATE_TYPES 5
Expand Down Expand Up @@ -169,7 +170,9 @@ struct per_cpu_pages {
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
int batch; /* chunk size for buddy add/remove */
struct list_head list; /* the list of pages */

/* Lists of pages, one per migrate type stored on the pcp-lists */
struct list_head lists[MIGRATE_PCPTYPES];
};

struct per_cpu_pageset {
Expand Down
106 changes: 59 additions & 47 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -511,7 +511,7 @@ static inline int free_pages_check(struct page *page)
}

/*
* Frees a list of pages.
* Frees a number of pages from the PCP lists
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
Expand All @@ -521,23 +521,36 @@ static inline int free_pages_check(struct page *page)
* And clear the zone's pages_scanned counter, to hold off the "all pages are
* pinned" detection logic.
*/
static void free_pages_bulk(struct zone *zone, int count,
struct list_head *list, int order)
static void free_pcppages_bulk(struct zone *zone, int count,
struct per_cpu_pages *pcp)
{
int migratetype = 0;

spin_lock(&zone->lock);
zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0;

__mod_zone_page_state(zone, NR_FREE_PAGES, count << order);
__mod_zone_page_state(zone, NR_FREE_PAGES, count);
while (count--) {
struct page *page;
struct list_head *list;

/*
* Remove pages from lists in a round-robin fashion. This spinning
* around potentially empty lists is bloody awful, alternatives that
* don't suck are welcome
*/
do {
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
list = &pcp->lists[migratetype];
} while (list_empty(list));

VM_BUG_ON(list_empty(list));
page = list_entry(list->prev, struct page, lru);
/* have to delete it as __free_one_page list manipulates */
list_del(&page->lru);
trace_mm_page_pcpu_drain(page, order, page_private(page));
__free_one_page(page, zone, order, page_private(page));
trace_mm_page_pcpu_drain(page, 0, migratetype);
__free_one_page(page, zone, 0, migratetype);
}
spin_unlock(&zone->lock);
}
Expand Down Expand Up @@ -953,7 +966,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
to_drain = pcp->batch;
else
to_drain = pcp->count;
free_pages_bulk(zone, to_drain, &pcp->list, 0);
free_pcppages_bulk(zone, to_drain, pcp);
pcp->count -= to_drain;
local_irq_restore(flags);
}
Expand All @@ -979,7 +992,7 @@ static void drain_pages(unsigned int cpu)

pcp = &pset->pcp;
local_irq_save(flags);
free_pages_bulk(zone, pcp->count, &pcp->list, 0);
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
local_irq_restore(flags);
}
Expand Down Expand Up @@ -1045,6 +1058,7 @@ static void free_hot_cold_page(struct page *page, int cold)
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
unsigned long flags;
int migratetype;
int wasMlocked = __TestClearPageMlocked(page);

kmemcheck_free_shadow(page, 0);
Expand All @@ -1062,21 +1076,39 @@ static void free_hot_cold_page(struct page *page, int cold)
kernel_map_pages(page, 1, 0);

pcp = &zone_pcp(zone, get_cpu())->pcp;
set_page_private(page, get_pageblock_migratetype(page));
migratetype = get_pageblock_migratetype(page);
set_page_private(page, migratetype);
local_irq_save(flags);
if (unlikely(wasMlocked))
free_page_mlock(page);
__count_vm_event(PGFREE);

/*
* We only track unmovable, reclaimable and movable on pcp lists.
* Free ISOLATE pages back to the allocator because they are being
* offlined but treat RESERVE as movable pages so we can get those
* areas back if necessary. Otherwise, we may have to free
* excessively into the page allocator
*/
if (migratetype >= MIGRATE_PCPTYPES) {
if (unlikely(migratetype == MIGRATE_ISOLATE)) {
free_one_page(zone, page, 0, migratetype);
goto out;
}
migratetype = MIGRATE_MOVABLE;
}

if (cold)
list_add_tail(&page->lru, &pcp->list);
list_add_tail(&page->lru, &pcp->lists[migratetype]);
else
list_add(&page->lru, &pcp->list);
list_add(&page->lru, &pcp->lists[migratetype]);
pcp->count++;
if (pcp->count >= pcp->high) {
free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
free_pcppages_bulk(zone, pcp->batch, pcp);
pcp->count -= pcp->batch;
}

out:
local_irq_restore(flags);
put_cpu();
}
Expand Down Expand Up @@ -1134,46 +1166,24 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
cpu = get_cpu();
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
struct list_head *list;

pcp = &zone_pcp(zone, cpu)->pcp;
list = &pcp->lists[migratetype];
local_irq_save(flags);
if (!pcp->count) {
pcp->count = rmqueue_bulk(zone, 0,
pcp->batch, &pcp->list,
migratetype, cold);
if (unlikely(!pcp->count))
goto failed;
}

/* Find a page of the appropriate migrate type */
if (cold) {
list_for_each_entry_reverse(page, &pcp->list, lru)
if (page_private(page) == migratetype)
break;
} else {
list_for_each_entry(page, &pcp->list, lru)
if (page_private(page) == migratetype)
break;
}

/* Allocate more to the pcp list if necessary */
if (unlikely(&page->lru == &pcp->list)) {
int get_one_page = 0;

if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, &pcp->list,
pcp->batch, list,
migratetype, cold);
list_for_each_entry(page, &pcp->list, lru) {
if (get_pageblock_migratetype(page) !=
MIGRATE_ISOLATE) {
get_one_page = 1;
break;
}
}
if (!get_one_page)
if (unlikely(list_empty(list)))
goto failed;
}

if (cold)
page = list_entry(list->prev, struct page, lru);
else
page = list_entry(list->next, struct page, lru);

list_del(&page->lru);
pcp->count--;
} else {
Expand Down Expand Up @@ -3024,14 +3034,16 @@ static int zone_batchsize(struct zone *zone)
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
{
struct per_cpu_pages *pcp;
int migratetype;

memset(p, 0, sizeof(*p));

pcp = &p->pcp;
pcp->count = 0;
pcp->high = 6 * batch;
pcp->batch = max(1UL, 1 * batch);
INIT_LIST_HEAD(&pcp->list);
for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
INIT_LIST_HEAD(&pcp->lists[migratetype]);
}

/*
Expand Down Expand Up @@ -3223,7 +3235,7 @@ static int __zone_pcp_update(void *data)
pcp = &pset->pcp;

local_irq_save(flags);
free_pages_bulk(zone, pcp->count, &pcp->list, 0);
free_pcppages_bulk(zone, pcp->count, pcp);
setup_pageset(pset, batch);
local_irq_restore(flags);
}
Expand Down

0 comments on commit f773c7b

Please sign in to comment.