Skip to content

Commit

Permalink
Merge branch 'akpm' (patches from Andrew)
Browse files Browse the repository at this point in the history
Merge two mm fixes from Andrew Morton.

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm: prevent NR_ISOLATE_* stats from going negative
  Revert "mm, page_alloc: only use per-cpu allocator for irq-safe requests"
  • Loading branch information
Linus Torvalds committed Apr 20, 2017
2 parents 160062e + fc280fe commit c154165
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 24 deletions.
2 changes: 1 addition & 1 deletion mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -184,9 +184,9 @@ void putback_movable_pages(struct list_head *l)
unlock_page(page);
put_page(page);
} else {
putback_lru_page(page);
dec_node_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
putback_lru_page(page);
}
}
}
Expand Down
43 changes: 20 additions & 23 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1090,10 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
{
int migratetype = 0;
int batch_free = 0;
unsigned long nr_scanned, flags;
unsigned long nr_scanned;
bool isolated_pageblocks;

spin_lock_irqsave(&zone->lock, flags);
spin_lock(&zone->lock);
isolated_pageblocks = has_isolate_pageblock(zone);
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
if (nr_scanned)
Expand Down Expand Up @@ -1142,17 +1142,16 @@ static void free_pcppages_bulk(struct zone *zone, int count,
trace_mm_page_pcpu_drain(page, 0, mt);
} while (--count && --batch_free && !list_empty(list));
}
spin_unlock_irqrestore(&zone->lock, flags);
spin_unlock(&zone->lock);
}

static void free_one_page(struct zone *zone,
struct page *page, unsigned long pfn,
unsigned int order,
int migratetype)
{
unsigned long nr_scanned, flags;
spin_lock_irqsave(&zone->lock, flags);
__count_vm_events(PGFREE, 1 << order);
unsigned long nr_scanned;
spin_lock(&zone->lock);
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
if (nr_scanned)
__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
Expand All @@ -1162,7 +1161,7 @@ static void free_one_page(struct zone *zone,
migratetype = get_pfnblock_migratetype(page, pfn);
}
__free_one_page(page, pfn, zone, order, migratetype);
spin_unlock_irqrestore(&zone->lock, flags);
spin_unlock(&zone->lock);
}

static void __meminit __init_single_page(struct page *page, unsigned long pfn,
Expand Down Expand Up @@ -1240,14 +1239,18 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)

static void __free_pages_ok(struct page *page, unsigned int order)
{
unsigned long flags;
int migratetype;
unsigned long pfn = page_to_pfn(page);

if (!free_pages_prepare(page, order, true))
return;

migratetype = get_pfnblock_migratetype(page, pfn);
local_irq_save(flags);
__count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, pfn, order, migratetype);
local_irq_restore(flags);
}

static void __init __free_pages_boot_core(struct page *page, unsigned int order)
Expand Down Expand Up @@ -2219,9 +2222,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
int migratetype, bool cold)
{
int i, alloced = 0;
unsigned long flags;

spin_lock_irqsave(&zone->lock, flags);
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
struct page *page = __rmqueue(zone, order, migratetype);
if (unlikely(page == NULL))
Expand Down Expand Up @@ -2257,7 +2259,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
* pages added to the pcp list.
*/
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
spin_unlock_irqrestore(&zone->lock, flags);
spin_unlock(&zone->lock);
return alloced;
}

Expand Down Expand Up @@ -2485,20 +2487,17 @@ void free_hot_cold_page(struct page *page, bool cold)
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
unsigned long flags;
unsigned long pfn = page_to_pfn(page);
int migratetype;

if (in_interrupt()) {
__free_pages_ok(page, 0);
return;
}

if (!free_pcp_prepare(page))
return;

migratetype = get_pfnblock_migratetype(page, pfn);
set_pcppage_migratetype(page, migratetype);
preempt_disable();
local_irq_save(flags);
__count_vm_event(PGFREE);

/*
* We only track unmovable, reclaimable and movable on pcp lists.
Expand All @@ -2515,7 +2514,6 @@ void free_hot_cold_page(struct page *page, bool cold)
migratetype = MIGRATE_MOVABLE;
}

__count_vm_event(PGFREE);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
if (!cold)
list_add(&page->lru, &pcp->lists[migratetype]);
Expand All @@ -2529,7 +2527,7 @@ void free_hot_cold_page(struct page *page, bool cold)
}

out:
preempt_enable();
local_irq_restore(flags);
}

/*
Expand Down Expand Up @@ -2654,8 +2652,6 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
{
struct page *page;

VM_BUG_ON(in_interrupt());

do {
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
Expand Down Expand Up @@ -2686,16 +2682,17 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
struct list_head *list;
bool cold = ((gfp_flags & __GFP_COLD) != 0);
struct page *page;
unsigned long flags;

preempt_disable();
local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
if (page) {
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
}
preempt_enable();
local_irq_restore(flags);
return page;
}

Expand All @@ -2711,7 +2708,7 @@ struct page *rmqueue(struct zone *preferred_zone,
unsigned long flags;
struct page *page;

if (likely(order == 0) && !in_interrupt()) {
if (likely(order == 0)) {
page = rmqueue_pcplist(preferred_zone, zone, order,
gfp_flags, migratetype);
goto out;
Expand Down

0 comments on commit c154165

Please sign in to comment.