Skip to content

Commit

Permalink
mm, compaction: wrap calculating first and last pfn of pageblock
Browse files Browse the repository at this point in the history
Compaction code has accumulated numerous instances of manual
calculations of the first (inclusive) and last (exclusive) pfn of a
pageblock (or a smaller block of given order), given a pfn within the
pageblock.

Wrap these calculations by introducing pageblock_start_pfn(pfn) and
pageblock_end_pfn(pfn) macros.

[vbabka@suse.cz: fix crash in get_pfnblock_flags_mask() from isolate_freepages():]
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Rik van Riel <riel@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Vlastimil Babka authored and Linus Torvalds committed May 20, 2016
1 parent e4c5800 commit 06b6640
Showing 1 changed file with 19 additions and 14 deletions.
33 changes: 19 additions & 14 deletions mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,11 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>

#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)

static unsigned long release_freepages(struct list_head *freelist)
{
struct page *page, *next;
Expand Down Expand Up @@ -161,7 +166,7 @@ static void reset_cached_positions(struct zone *zone)
zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
zone->compact_cached_free_pfn =
round_down(zone_end_pfn(zone) - 1, pageblock_nr_pages);
pageblock_start_pfn(zone_end_pfn(zone) - 1);
}

/*
Expand Down Expand Up @@ -519,10 +524,10 @@ isolate_freepages_range(struct compact_control *cc,
LIST_HEAD(freelist);

pfn = start_pfn;
block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
block_start_pfn = pageblock_start_pfn(pfn);
if (block_start_pfn < cc->zone->zone_start_pfn)
block_start_pfn = cc->zone->zone_start_pfn;
block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
block_end_pfn = pageblock_end_pfn(pfn);

for (; pfn < end_pfn; pfn += isolated,
block_start_pfn = block_end_pfn,
Expand All @@ -538,8 +543,8 @@ isolate_freepages_range(struct compact_control *cc,
* scanning range to right one.
*/
if (pfn >= block_end_pfn) {
block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
block_start_pfn = pageblock_start_pfn(pfn);
block_end_pfn = pageblock_end_pfn(pfn);
block_end_pfn = min(block_end_pfn, end_pfn);
}

Expand Down Expand Up @@ -834,10 +839,10 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,

/* Scan block by block. First and last block may be incomplete */
pfn = start_pfn;
block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
block_start_pfn = pageblock_start_pfn(pfn);
if (block_start_pfn < cc->zone->zone_start_pfn)
block_start_pfn = cc->zone->zone_start_pfn;
block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
block_end_pfn = pageblock_end_pfn(pfn);

for (; pfn < end_pfn; pfn = block_end_pfn,
block_start_pfn = block_end_pfn,
Expand Down Expand Up @@ -924,10 +929,10 @@ static void isolate_freepages(struct compact_control *cc)
* is using.
*/
isolate_start_pfn = cc->free_pfn;
block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
block_start_pfn = pageblock_start_pfn(cc->free_pfn);
block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
zone_end_pfn(zone));
low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
low_pfn = pageblock_end_pfn(cc->migrate_pfn);

/*
* Isolate free pages until enough are available to migrate the
Expand Down Expand Up @@ -1081,12 +1086,12 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
* initialized by compact_zone()
*/
low_pfn = cc->migrate_pfn;
block_start_pfn = cc->migrate_pfn & ~(pageblock_nr_pages - 1);
block_start_pfn = pageblock_start_pfn(low_pfn);
if (block_start_pfn < zone->zone_start_pfn)
block_start_pfn = zone->zone_start_pfn;

/* Only scan within a pageblock boundary */
block_end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
block_end_pfn = pageblock_end_pfn(low_pfn);

/*
* Iterate over whole pageblocks until we find the first suitable.
Expand Down Expand Up @@ -1343,7 +1348,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
cc->free_pfn = zone->compact_cached_free_pfn;
if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
cc->free_pfn = round_down(end_pfn - 1, pageblock_nr_pages);
cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
zone->compact_cached_free_pfn = cc->free_pfn;
}
if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
Expand Down Expand Up @@ -1411,7 +1416,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
if (cc->order > 0 && cc->last_migrated_pfn) {
int cpu;
unsigned long current_block_start =
cc->migrate_pfn & ~((1UL << cc->order) - 1);
block_start_pfn(cc->migrate_pfn, cc->order);

if (cc->last_migrated_pfn < current_block_start) {
cpu = get_cpu();
Expand All @@ -1436,7 +1441,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
cc->nr_freepages = 0;
VM_BUG_ON(free_pfn == 0);
/* The cached pfn is always the first in a pageblock */
free_pfn &= ~(pageblock_nr_pages-1);
free_pfn = pageblock_start_pfn(free_pfn);
/*
* Only go back, not forward. The cached pfn might have been
* already reset to zone end in compact_finished()
Expand Down

0 comments on commit 06b6640

Please sign in to comment.