Skip to content

Commit

Permalink
mm: memcontrol: move socket code for unified hierarchy accounting
Browse files Browse the repository at this point in the history
The unified hierarchy memory controller will account socket memory.
Move the infrastructure functions accordingly.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Johannes Weiner authored and Linus Torvalds committed Jan 15, 2016
1 parent 7941d21 commit 1109208
Showing 1 changed file with 74 additions and 74 deletions.
148 changes: 74 additions & 74 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -294,80 +294,6 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
return mem_cgroup_from_css(css);
}

/* Writing them here to avoid exposing memcg's inner layout */
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)

struct static_key memcg_sockets_enabled_key;
EXPORT_SYMBOL(memcg_sockets_enabled_key);

void sock_update_memcg(struct sock *sk)
{
struct mem_cgroup *memcg;

/* Socket cloning can throw us here with sk_cgrp already
* filled. It won't however, necessarily happen from
* process context. So the test for root memcg given
* the current task's memcg won't help us in this case.
*
* Respecting the original socket's memcg is a better
* decision in this case.
*/
if (sk->sk_memcg) {
BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
css_get(&sk->sk_memcg->css);
return;
}

rcu_read_lock();
memcg = mem_cgroup_from_task(current);
if (memcg != root_mem_cgroup &&
memcg->tcp_mem.active &&
css_tryget_online(&memcg->css))
sk->sk_memcg = memcg;
rcu_read_unlock();
}
EXPORT_SYMBOL(sock_update_memcg);

void sock_release_memcg(struct sock *sk)
{
WARN_ON(!sk->sk_memcg);
css_put(&sk->sk_memcg->css);
}

/**
* mem_cgroup_charge_skmem - charge socket memory
* @memcg: memcg to charge
* @nr_pages: number of pages to charge
*
* Charges @nr_pages to @memcg. Returns %true if the charge fit within
* @memcg's configured limit, %false if the charge had to be forced.
*/
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
struct page_counter *counter;

if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated,
nr_pages, &counter)) {
memcg->tcp_mem.memory_pressure = 0;
return true;
}
page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages);
memcg->tcp_mem.memory_pressure = 1;
return false;
}

/**
* mem_cgroup_uncharge_skmem - uncharge socket memory
* @memcg - memcg to uncharge
* @nr_pages - number of pages to uncharge
*/
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
page_counter_uncharge(&memcg->tcp_mem.memory_allocated, nr_pages);
}

#endif

#ifdef CONFIG_MEMCG_KMEM
/*
* This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
Expand Down Expand Up @@ -5607,6 +5533,80 @@ void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
commit_charge(newpage, memcg, true);
}

/* Writing them here to avoid exposing memcg's inner layout */
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)

struct static_key memcg_sockets_enabled_key;
EXPORT_SYMBOL(memcg_sockets_enabled_key);

void sock_update_memcg(struct sock *sk)
{
struct mem_cgroup *memcg;

/* Socket cloning can throw us here with sk_cgrp already
* filled. It won't however, necessarily happen from
* process context. So the test for root memcg given
* the current task's memcg won't help us in this case.
*
* Respecting the original socket's memcg is a better
* decision in this case.
*/
if (sk->sk_memcg) {
BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
css_get(&sk->sk_memcg->css);
return;
}

rcu_read_lock();
memcg = mem_cgroup_from_task(current);
if (memcg != root_mem_cgroup &&
memcg->tcp_mem.active &&
css_tryget_online(&memcg->css))
sk->sk_memcg = memcg;
rcu_read_unlock();
}
EXPORT_SYMBOL(sock_update_memcg);

void sock_release_memcg(struct sock *sk)
{
WARN_ON(!sk->sk_memcg);
css_put(&sk->sk_memcg->css);
}

/**
* mem_cgroup_charge_skmem - charge socket memory
* @memcg: memcg to charge
* @nr_pages: number of pages to charge
*
* Charges @nr_pages to @memcg. Returns %true if the charge fit within
* @memcg's configured limit, %false if the charge had to be forced.
*/
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
struct page_counter *counter;

if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated,
nr_pages, &counter)) {
memcg->tcp_mem.memory_pressure = 0;
return true;
}
page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages);
memcg->tcp_mem.memory_pressure = 1;
return false;
}

/**
* mem_cgroup_uncharge_skmem - uncharge socket memory
* @memcg - memcg to uncharge
* @nr_pages - number of pages to uncharge
*/
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
page_counter_uncharge(&memcg->tcp_mem.memory_allocated, nr_pages);
}

#endif

/*
* subsys_initcall() for memory controller.
*
Expand Down

0 comments on commit 1109208

Please sign in to comment.