Skip to content

Commit

Permalink
mm: memcg/slab: cache page number in memcg_(un)charge_slab()
Browse files Browse the repository at this point in the history
There are many places in memcg_charge_slab() and memcg_uncharge_slab()
which are calculating the number of pages to charge, css references to
grab etc depending on the order of the slab page.

Let's simplify the code by calculating it once and caching in the local
variable.

Signed-off-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Link: http://lkml.kernel.org/r/20200109202659.752357-6-guro@fb.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Roman Gushchin authored and Linus Torvalds committed Apr 2, 2020
1 parent 92d0510 commit 9c315e4
Showing 1 changed file with 12 additions and 10 deletions.
22 changes: 12 additions & 10 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -348,6 +348,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
gfp_t gfp, int order,
struct kmem_cache *s)
{
unsigned int nr_pages = 1 << order;
struct mem_cgroup *memcg;
struct lruvec *lruvec;
int ret;
Expand All @@ -360,21 +361,21 @@ static __always_inline int memcg_charge_slab(struct page *page,

if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
(1 << order));
percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
nr_pages);
percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
return 0;
}

ret = memcg_kmem_charge_memcg(memcg, gfp, 1 << order);
ret = memcg_kmem_charge_memcg(memcg, gfp, nr_pages);
if (ret)
goto out;

lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages);

/* transer try_charge() page references to kmem_cache */
percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
css_put_many(&memcg->css, 1 << order);
percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
css_put_many(&memcg->css, nr_pages);
out:
css_put(&memcg->css);
return ret;
Expand All @@ -387,22 +388,23 @@ static __always_inline int memcg_charge_slab(struct page *page,
static __always_inline void memcg_uncharge_slab(struct page *page, int order,
struct kmem_cache *s)
{
unsigned int nr_pages = 1 << order;
struct mem_cgroup *memcg;
struct lruvec *lruvec;

rcu_read_lock();
memcg = READ_ONCE(s->memcg_params.memcg);
if (likely(!mem_cgroup_is_root(memcg))) {
lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
memcg_kmem_uncharge_memcg(memcg, order);
mod_lruvec_state(lruvec, cache_vmstat_idx(s), -nr_pages);
memcg_kmem_uncharge_memcg(memcg, nr_pages);
} else {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
-(1 << order));
-nr_pages);
}
rcu_read_unlock();

percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
percpu_ref_put_many(&s->memcg_params.refcnt, nr_pages);
}

extern void slab_init_memcg_params(struct kmem_cache *);
Expand Down

0 comments on commit 9c315e4

Please sign in to comment.