Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 336259
b: refs/heads/master
c: 1c95df8
h: refs/heads/master
i:
  336257: 65425fa
  336255: 0b2052c
v: v3
  • Loading branch information
Neal Cardwell authored and David S. Miller committed Dec 9, 2012
1 parent a6409fc commit a364e62
Show file tree
Hide file tree
Showing 7 changed files with 77 additions and 42 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: caf491916b1c1e939a2c7575efb7a77f11fc9bdf
refs/heads/master: 1c95df85ca49640576de2f0a850925957b547b84
6 changes: 3 additions & 3 deletions trunk/drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -1796,7 +1796,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
*/
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
gfp = mapping_gfp_mask(mapping);
gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
gfp |= __GFP_NORETRY | __GFP_NOWARN;
gfp &= ~(__GFP_IO | __GFP_WAIT);
for_each_sg(st->sgl, sg, page_count, i) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
Expand All @@ -1809,15 +1809,15 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
*/
gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
gfp &= ~(__GFP_NORETRY | __GFP_NOWARN);
gfp |= __GFP_IO | __GFP_WAIT;

i915_gem_shrink_all(dev_priv);
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page))
goto err_pages;

gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
gfp |= __GFP_NORETRY | __GFP_NOWARN;
gfp &= ~(__GFP_IO | __GFP_WAIT);
}

Expand Down
6 changes: 2 additions & 4 deletions trunk/drivers/mtd/mtdcore.c
Original file line number Diff line number Diff line change
Expand Up @@ -1077,8 +1077,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
* until the request succeeds or until the allocation size falls below
* the system page size. This attempts to make sure it does not adversely
* impact system performance, so when allocating more than one page, we
* ask the memory allocator to avoid re-trying, swapping, writing back
* or performing I/O.
* ask the memory allocator to avoid re-trying.
*
* Note, this function also makes sure that the allocated buffer is aligned to
* the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
Expand All @@ -1092,8 +1091,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
*/
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
{
gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
__GFP_NORETRY | __GFP_NO_KSWAPD;
gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY;
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
void *kbuf;

Expand Down
13 changes: 5 additions & 8 deletions trunk/include/linux/gfp.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,9 @@ struct vm_area_struct;
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
#define ___GFP_NOTRACK 0x100000u
#define ___GFP_OTHER_NODE 0x200000u
#define ___GFP_WRITE 0x400000u

/*
* GFP bitmasks..
Expand Down Expand Up @@ -86,7 +85,6 @@ struct vm_area_struct;
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */

#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */

Expand All @@ -96,7 +94,7 @@ struct vm_area_struct;
*/
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)

#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
#define __GFP_BITS_SHIFT 23 /* Room for N __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))

/* This equals 0, but use constants in case they ever change */
Expand All @@ -116,8 +114,7 @@ struct vm_area_struct;
__GFP_MOVABLE)
#define GFP_IOFS (__GFP_IO | __GFP_FS)
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD)
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN)

#ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
Expand Down
1 change: 0 additions & 1 deletion trunk/include/trace/events/gfpflags.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
{(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
{(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
{(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
{(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
) : "GFP_NOWAIT"

38 changes: 27 additions & 11 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2378,6 +2378,15 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
}

/* Returns true if the allocation is likely for THP */
static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order)
{
if (order == pageblock_order &&
(gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
return true;
return false;
}

static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
Expand Down Expand Up @@ -2416,9 +2425,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto nopage;

restart:
if (!(gfp_mask & __GFP_NO_KSWAPD))
/* The decision whether to wake kswapd for THP is made later */
if (!is_thp_alloc(gfp_mask, order))
wake_all_kswapd(order, zonelist, high_zoneidx,
zone_idx(preferred_zone));
zone_idx(preferred_zone));

/*
* OK, we're below the kswapd watermark and have kicked background
Expand Down Expand Up @@ -2488,15 +2498,21 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto got_pg;
sync_migration = true;

/*
* If compaction is deferred for high-order allocations, it is because
* sync compaction recently failed. In this is the case and the caller
* requested a movable allocation that does not heavily disrupt the
* system then fail the allocation instead of entering direct reclaim.
*/
if ((deferred_compaction || contended_compaction) &&
(gfp_mask & __GFP_NO_KSWAPD))
goto nopage;
if (is_thp_alloc(gfp_mask, order)) {
/*
* If compaction is deferred for high-order allocations, it is
* because sync compaction recently failed. If this is the case
* and the caller requested a movable allocation that does not
* heavily disrupt the system then fail the allocation instead
* of entering direct reclaim.
*/
if (deferred_compaction || contended_compaction)
goto nopage;

/* If process is willing to reclaim/compact then wake kswapd */
wake_all_kswapd(order, zonelist, high_zoneidx,
zone_idx(preferred_zone));
}

/* Try direct reclaim and then allocating */
page = __alloc_pages_direct_reclaim(gfp_mask, order,
Expand Down
53 changes: 39 additions & 14 deletions trunk/net/ipv4/inet_diag.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,10 @@ struct inet_diag_entry {
u16 dport;
u16 family;
u16 userlocks;
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr saddr_storage; /* for IPv4-mapped-IPv6 addresses */
struct in6_addr daddr_storage; /* for IPv4-mapped-IPv6 addresses */
#endif
};

static DEFINE_MUTEX(inet_diag_table_mutex);
Expand Down Expand Up @@ -596,6 +600,36 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}

/* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses
* from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6.
*/
static inline void inet_diag_req_addrs(const struct sock *sk,
const struct request_sock *req,
struct inet_diag_entry *entry)
{
struct inet_request_sock *ireq = inet_rsk(req);

#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6) {
if (req->rsk_ops->family == AF_INET6) {
entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32;
entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32;
} else if (req->rsk_ops->family == AF_INET) {
ipv6_addr_set_v4mapped(ireq->loc_addr,
&entry->saddr_storage);
ipv6_addr_set_v4mapped(ireq->rmt_addr,
&entry->daddr_storage);
entry->saddr = entry->saddr_storage.s6_addr32;
entry->daddr = entry->daddr_storage.s6_addr32;
}
} else
#endif
{
entry->saddr = &ireq->loc_addr;
entry->daddr = &ireq->rmt_addr;
}
}

static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
struct request_sock *req,
struct user_namespace *user_ns,
Expand Down Expand Up @@ -637,8 +671,10 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
r->idiag_inode = 0;
#if IS_ENABLED(CONFIG_IPV6)
if (r->idiag_family == AF_INET6) {
*(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr;
*(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
struct inet_diag_entry entry;
inet_diag_req_addrs(sk, req, &entry);
memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr));
memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr));
}
#endif

Expand Down Expand Up @@ -691,18 +727,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
continue;

if (bc) {
entry.saddr =
#if IS_ENABLED(CONFIG_IPV6)
(entry.family == AF_INET6) ?
inet6_rsk(req)->loc_addr.s6_addr32 :
#endif
&ireq->loc_addr;
entry.daddr =
#if IS_ENABLED(CONFIG_IPV6)
(entry.family == AF_INET6) ?
inet6_rsk(req)->rmt_addr.s6_addr32 :
#endif
&ireq->rmt_addr;
inet_diag_req_addrs(sk, req, &entry);
entry.dport = ntohs(ireq->rmt_port);

if (!inet_diag_bc_run(bc, &entry))
Expand Down

0 comments on commit a364e62

Please sign in to comment.