Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 320818
b: refs/heads/master
c: 072bb0a
h: refs/heads/master
v: v3
  • Loading branch information
Mel Gorman authored and Linus Torvalds committed Aug 1, 2012
1 parent 2eefb2c commit 1e65eab
Show file tree
Hide file tree
Showing 7 changed files with 265 additions and 26 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 702d1a6e0766d45642c934444fd41f658d251305
refs/heads/master: 072bb0aa5e062902968c5c1007bba332c7820cf4
9 changes: 9 additions & 0 deletions trunk/include/linux/mm_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,15 @@ struct page {
union {
pgoff_t index; /* Our offset within mapping. */
void *freelist; /* slub/slob first free object */
bool pfmemalloc; /* If set by the page allocator,
* ALLOC_PFMEMALLOC was set
* and the low watermark was not
* met implying that the system
* is under some pressure. The
* caller should try ensure
* this page is only used to
* free other pages.
*/
};

union {
Expand Down
29 changes: 29 additions & 0 deletions trunk/include/linux/page-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

#include <linux/types.h>
#include <linux/bug.h>
#include <linux/mmdebug.h>
#ifndef __GENERATING_BOUNDS_H
#include <linux/mm_types.h>
#include <generated/bounds.h>
Expand Down Expand Up @@ -453,6 +454,34 @@ static inline int PageTransTail(struct page *page)
}
#endif

/*
* If network-based swap is enabled, sl*b must keep track of whether pages
* were allocated from pfmemalloc reserves.
*/
static inline int PageSlabPfmemalloc(struct page *page)
{
VM_BUG_ON(!PageSlab(page));
return PageActive(page);
}

static inline void SetPageSlabPfmemalloc(struct page *page)
{
VM_BUG_ON(!PageSlab(page));
SetPageActive(page);
}

static inline void __ClearPageSlabPfmemalloc(struct page *page)
{
VM_BUG_ON(!PageSlab(page));
__ClearPageActive(page);
}

static inline void ClearPageSlabPfmemalloc(struct page *page)
{
VM_BUG_ON(!PageSlab(page));
ClearPageActive(page);
}

#ifdef CONFIG_MMU
#define __PG_MLOCKED (1 << PG_mlocked)
#else
Expand Down
3 changes: 3 additions & 0 deletions trunk/mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,9 @@ static inline struct page *mem_map_next(struct page *iter,
#define __paginginit __init
#endif

/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);

/* Memory initialisation debug and verification */
enum mminit_level {
MMINIT_WARNING,
Expand Down
27 changes: 22 additions & 5 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1513,6 +1513,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
#define ALLOC_HARDER 0x10 /* try to alloc harder */
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
#define ALLOC_PFMEMALLOC 0x80 /* Caller has PF_MEMALLOC set */

#ifdef CONFIG_FAIL_PAGE_ALLOC

Expand Down Expand Up @@ -2293,16 +2294,22 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
} else if (unlikely(rt_task(current)) && !in_interrupt())
alloc_flags |= ALLOC_HARDER;

if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
if (!in_interrupt() &&
((current->flags & PF_MEMALLOC) ||
unlikely(test_thread_flag(TIF_MEMDIE))))
if ((current->flags & PF_MEMALLOC) ||
unlikely(test_thread_flag(TIF_MEMDIE))) {
alloc_flags |= ALLOC_PFMEMALLOC;

if (likely(!(gfp_mask & __GFP_NOMEMALLOC)) && !in_interrupt())
alloc_flags |= ALLOC_NO_WATERMARKS;
}

return alloc_flags;
}

bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
{
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_PFMEMALLOC);
}

static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
Expand Down Expand Up @@ -2490,10 +2497,18 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
warn_alloc_failed(gfp_mask, order, NULL);
return page;
got_pg:
/*
* page->pfmemalloc is set when the caller had PFMEMALLOC set or is
* been OOM killed. The expectation is that the caller is taking
* steps that will free more memory. The caller should avoid the
* page being used for !PFMEMALLOC purposes.
*/
page->pfmemalloc = !!(alloc_flags & ALLOC_PFMEMALLOC);

if (kmemcheck_enabled)
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
return page;

return page;
}

/*
Expand Down Expand Up @@ -2544,6 +2559,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
page = __alloc_pages_slowpath(gfp_mask, order,
zonelist, high_zoneidx, nodemask,
preferred_zone, migratetype);
else
page->pfmemalloc = false;

trace_mm_page_alloc(page, order, gfp_mask, migratetype);

Expand Down
Loading

0 comments on commit 1e65eab

Please sign in to comment.