Skip to content

Commit

Permalink
[PATCH] mm: pagealloc opt
Browse files Browse the repository at this point in the history
Slightly optimise some page allocation and freeing functions by taking
advantage of knowing whether or not interrupts are disabled.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Nick Piggin authored and Linus Torvalds committed Jan 6, 2006
1 parent c484d41 commit c54ad30
Showing 1 changed file with 11 additions and 7 deletions.
18 changes: 11 additions & 7 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -375,11 +375,10 @@ static int
free_pages_bulk(struct zone *zone, int count,
struct list_head *list, unsigned int order)
{
unsigned long flags;
struct page *page = NULL;
int ret = 0;

spin_lock_irqsave(&zone->lock, flags);
spin_lock(&zone->lock);
zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
while (!list_empty(list) && count--) {
Expand All @@ -389,12 +388,13 @@ free_pages_bulk(struct zone *zone, int count,
__free_pages_bulk(page, zone, order);
ret++;
}
spin_unlock_irqrestore(&zone->lock, flags);
spin_unlock(&zone->lock);
return ret;
}

void __free_pages_ok(struct page *page, unsigned int order)
{
unsigned long flags;
LIST_HEAD(list);
int i;
int reserved = 0;
Expand All @@ -415,7 +415,9 @@ void __free_pages_ok(struct page *page, unsigned int order)
list_add(&page->lru, &list);
mod_page_state(pgfree, 1 << order);
kernel_map_pages(page, 1<<order, 0);
local_irq_save(flags);
free_pages_bulk(page_zone(page), 1, &list, order);
local_irq_restore(flags);
}


Expand Down Expand Up @@ -539,20 +541,19 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list)
{
unsigned long flags;
int i;
int allocated = 0;
struct page *page;

spin_lock_irqsave(&zone->lock, flags);
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
page = __rmqueue(zone, order);
if (page == NULL)
break;
allocated++;
list_add_tail(&page->lru, list);
}
spin_unlock_irqrestore(&zone->lock, flags);
spin_unlock(&zone->lock);
return allocated;
}

Expand Down Expand Up @@ -589,6 +590,7 @@ void drain_remote_pages(void)
#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
static void __drain_pages(unsigned int cpu)
{
unsigned long flags;
struct zone *zone;
int i;

Expand All @@ -600,8 +602,10 @@ static void __drain_pages(unsigned int cpu)
struct per_cpu_pages *pcp;

pcp = &pset->pcp[i];
local_irq_save(flags);
pcp->count -= free_pages_bulk(zone, pcp->count,
&pcp->list, 0);
local_irq_restore(flags);
}
}
}
Expand Down Expand Up @@ -744,7 +748,7 @@ buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
if (pcp->count <= pcp->low)
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, &pcp->list);
if (pcp->count) {
if (likely(pcp->count)) {
page = list_entry(pcp->list.next, struct page, lru);
list_del(&page->lru);
pcp->count--;
Expand Down

0 comments on commit c54ad30

Please sign in to comment.