Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 336264
b: refs/heads/master
c: 2c68bc7
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Dec 11, 2012
1 parent 2e0ed14 commit 2973cdd
Show file tree
Hide file tree
Showing 6 changed files with 28 additions and 38 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1bf3751ec90cc3174e01f0d701e8449ce163d113
refs/heads/master: 2c68bc72dc77be3e8c32cd7ad6c7714ee21efd79
6 changes: 3 additions & 3 deletions trunk/drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -1796,7 +1796,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
*/
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
gfp = mapping_gfp_mask(mapping);
gfp |= __GFP_NORETRY | __GFP_NOWARN;
gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
gfp &= ~(__GFP_IO | __GFP_WAIT);
for_each_sg(st->sgl, sg, page_count, i) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
Expand All @@ -1809,15 +1809,15 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
*/
gfp &= ~(__GFP_NORETRY | __GFP_NOWARN);
gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
gfp |= __GFP_IO | __GFP_WAIT;

i915_gem_shrink_all(dev_priv);
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page))
goto err_pages;

gfp |= __GFP_NORETRY | __GFP_NOWARN;
gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
gfp &= ~(__GFP_IO | __GFP_WAIT);
}

Expand Down
6 changes: 4 additions & 2 deletions trunk/drivers/mtd/mtdcore.c
Original file line number Diff line number Diff line change
Expand Up @@ -1077,7 +1077,8 @@ EXPORT_SYMBOL_GPL(mtd_writev);
* until the request succeeds or until the allocation size falls below
* the system page size. This attempts to make sure it does not adversely
* impact system performance, so when allocating more than one page, we
* ask the memory allocator to avoid re-trying.
* ask the memory allocator to avoid re-trying, swapping, writing back
* or performing I/O.
*
* Note, this function also makes sure that the allocated buffer is aligned to
* the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
Expand All @@ -1091,7 +1092,8 @@ EXPORT_SYMBOL_GPL(mtd_writev);
*/
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
{
gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY;
gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
__GFP_NORETRY | __GFP_NO_KSWAPD;
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
void *kbuf;

Expand Down
13 changes: 8 additions & 5 deletions trunk/include/linux/gfp.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,10 @@ struct vm_area_struct;
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u
#define ___GFP_NOTRACK 0x100000u
#define ___GFP_OTHER_NODE 0x200000u
#define ___GFP_WRITE 0x400000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u

/*
* GFP bitmasks..
Expand Down Expand Up @@ -85,6 +86,7 @@ struct vm_area_struct;
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */

#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */

Expand All @@ -94,7 +96,7 @@ struct vm_area_struct;
*/
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)

#define __GFP_BITS_SHIFT 23 /* Room for N __GFP_FOO bits */
#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))

/* This equals 0, but use constants in case they ever change */
Expand All @@ -114,7 +116,8 @@ struct vm_area_struct;
__GFP_MOVABLE)
#define GFP_IOFS (__GFP_IO | __GFP_FS)
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN)
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD)

#ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
Expand Down
1 change: 1 addition & 0 deletions trunk/include/trace/events/gfpflags.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
{(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
{(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
{(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
{(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
) : "GFP_NOWAIT"

38 changes: 11 additions & 27 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2378,15 +2378,6 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
}

/* Returns true if the allocation is likely for THP */
static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order)
{
if (order == pageblock_order &&
(gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
return true;
return false;
}

static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
Expand Down Expand Up @@ -2425,10 +2416,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto nopage;

restart:
/* The decision whether to wake kswapd for THP is made later */
if (!is_thp_alloc(gfp_mask, order))
if (!(gfp_mask & __GFP_NO_KSWAPD))
wake_all_kswapd(order, zonelist, high_zoneidx,
zone_idx(preferred_zone));
zone_idx(preferred_zone));

/*
* OK, we're below the kswapd watermark and have kicked background
Expand Down Expand Up @@ -2498,21 +2488,15 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto got_pg;
sync_migration = true;

if (is_thp_alloc(gfp_mask, order)) {
/*
* If compaction is deferred for high-order allocations, it is
* because sync compaction recently failed. If this is the case
* and the caller requested a movable allocation that does not
* heavily disrupt the system then fail the allocation instead
* of entering direct reclaim.
*/
if (deferred_compaction || contended_compaction)
goto nopage;

/* If process is willing to reclaim/compact then wake kswapd */
wake_all_kswapd(order, zonelist, high_zoneidx,
zone_idx(preferred_zone));
}
/*
* If compaction is deferred for high-order allocations, it is because
* sync compaction recently failed. In this is the case and the caller
* requested a movable allocation that does not heavily disrupt the
* system then fail the allocation instead of entering direct reclaim.
*/
if ((deferred_compaction || contended_compaction) &&
(gfp_mask & __GFP_NO_KSWAPD))
goto nopage;

/* Try direct reclaim and then allocating */
page = __alloc_pages_direct_reclaim(gfp_mask, order,
Expand Down

0 comments on commit 2973cdd

Please sign in to comment.