Skip to content

Commit

Permalink
mm: cma: remove watermark hacks
Browse files Browse the repository at this point in the history
Commits 2139cbe ("cma: fix counting of isolated pages") and
d95ea5d ("cma: fix watermark checking") introduced a reliable
method of free page accounting when memory is being allocated from CMA
regions, so the workaround introduced earlier by commit 49f223a
("mm: trigger page reclaim in alloc_contig_range() to stabilise
watermarks") can be finally removed.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Kyungmin Park <kyungmin.park@samsung.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Mel Gorman <mel@csn.ul.ie>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Marek Szyprowski authored and Linus Torvalds committed Dec 12, 2012
1 parent 2e30abd commit bc357f4
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 67 deletions.
9 changes: 0 additions & 9 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,8 @@ enum {

#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define cma_wmark_pages(zone) zone->min_cma_pages
#else
# define is_migrate_cma(migratetype) false
# define cma_wmark_pages(zone) 0
#endif

#define for_each_migratetype_order(order, type) \
Expand Down Expand Up @@ -382,13 +380,6 @@ struct zone {
#ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
#endif
#ifdef CONFIG_CMA
/*
* CMA needs to increase watermark levels during the allocation
* process to make sure that the system is not starved.
*/
unsigned long min_cma_pages;
#endif
struct free_area free_area[MAX_ORDER];

Expand Down
58 changes: 0 additions & 58 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -5218,10 +5218,6 @@ static void __setup_per_zone_wmarks(void)
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);

zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);

setup_zone_migrate_reserve(zone);
spin_unlock_irqrestore(&zone->lock, flags);
}
Expand Down Expand Up @@ -5766,54 +5762,6 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
return ret > 0 ? 0 : ret;
}

/*
* Update zone's cma pages counter used for watermark level calculation.
*/
static inline void __update_cma_watermarks(struct zone *zone, int count)
{
unsigned long flags;
spin_lock_irqsave(&zone->lock, flags);
zone->min_cma_pages += count;
spin_unlock_irqrestore(&zone->lock, flags);
setup_per_zone_wmarks();
}

/*
* Trigger memory pressure bump to reclaim some pages in order to be able to
* allocate 'count' pages in single page units. Does similar work as
*__alloc_pages_slowpath() function.
*/
static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
{
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
struct zonelist *zonelist = node_zonelist(0, gfp_mask);
int did_some_progress = 0;
int order = 1;

/*
* Increase level of watermarks to force kswapd do his job
* to stabilise at new watermark level.
*/
__update_cma_watermarks(zone, count);

/* Obey watermarks as if the page was being allocated */
while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));

did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
NULL);
if (!did_some_progress) {
/* Exhausted what can be done so it's blamo time */
out_of_memory(zonelist, gfp_mask, order, NULL, false);
}
}

/* Restore original watermark levels. */
__update_cma_watermarks(zone, -count);

return count;
}

/**
* alloc_contig_range() -- tries to allocate given range of pages
* @start: start PFN to allocate
Expand All @@ -5837,7 +5785,6 @@ static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype)
{
struct zone *zone = page_zone(pfn_to_page(start));
unsigned long outer_start, outer_end;
int ret = 0, order;

Expand Down Expand Up @@ -5922,11 +5869,6 @@ int alloc_contig_range(unsigned long start, unsigned long end,
goto done;
}

/*
* Reclaim enough pages to make sure that contiguous allocation
* will not starve the system.
*/
__reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);

/* Grab isolated pages from freelists. */
outer_end = isolate_freepages_range(&cc, outer_start, end);
Expand Down

0 comments on commit bc357f4

Please sign in to comment.