Skip to content

Commit

Permalink
secretmem: add memcg accounting
Browse files Browse the repository at this point in the history
Account memory consumed by secretmem to memcg.  The accounting is updated
when the memory is actually allocated and freed.

Link: https://lkml.kernel.org/r/20210121122723.3446-9-rppt@kernel.org
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christopher Lameter <cl@linux.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: Hagen Paul Pfeifer <hagen@jauu.net>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Palmer Dabbelt <palmerdabbelt@google.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rick Edgecombe <rick.p.edgecombe@intel.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tycho Andersen <tycho@tycho.ws>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
  • Loading branch information
Mike Rapoport authored and Stephen Rothwell committed Feb 8, 2021
1 parent 437e9b1 commit b0ac51c
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 2 deletions.
3 changes: 2 additions & 1 deletion mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/secretmem.h>
#include <linux/blkdev.h>
#include <linux/security.h>
#include <linux/cpuset.h>
Expand Down Expand Up @@ -841,7 +842,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
page->mapping = mapping;
page->index = offset;

if (!huge) {
if (!huge && !page_is_secretmem(page)) {
error = mem_cgroup_charge(page, current->mm, gfp);
if (error)
goto error;
Expand Down
36 changes: 35 additions & 1 deletion mm/secretmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include <linux/memblock.h>
#include <linux/pseudo_fs.h>
#include <linux/secretmem.h>
#include <linux/memcontrol.h>
#include <linux/set_memory.h>
#include <linux/sched/signal.h>

Expand All @@ -44,6 +45,32 @@ struct secretmem_ctx {

static struct cma *secretmem_cma;

static int secretmem_account_pages(struct page *page, gfp_t gfp, int order)
{
int err;

err = memcg_kmem_charge_page(page, gfp, order);
if (err)
return err;

/*
* seceremem caches are unreclaimable kernel allocations, so treat
* them as unreclaimable slab memory for VM statistics purposes
*/
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
PAGE_SIZE << order);

return 0;
}

static void secretmem_unaccount_pages(struct page *page, int order)
{

mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
-PAGE_SIZE << order);
memcg_kmem_uncharge_page(page, order);
}

static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)
{
unsigned long nr_pages = (1 << PMD_PAGE_ORDER);
Expand All @@ -56,6 +83,10 @@ static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)
if (!page)
return -ENOMEM;

err = secretmem_account_pages(page, gfp, PMD_PAGE_ORDER);
if (err)
goto err_cma_release;

/*
* clear the data left from the prevoius user before dropping the
* pages from the direct map
Expand All @@ -65,7 +96,7 @@ static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)

err = set_direct_map_invalid_noflush(page, nr_pages);
if (err)
goto err_cma_release;
goto err_memcg_uncharge;

addr = (unsigned long)page_address(page);
err = gen_pool_add(pool, addr, PMD_SIZE, NUMA_NO_NODE);
Expand All @@ -83,6 +114,8 @@ static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)
* won't fail
*/
set_direct_map_default_noflush(page, nr_pages);
err_memcg_uncharge:
secretmem_unaccount_pages(page, PMD_PAGE_ORDER);
err_cma_release:
cma_release(secretmem_cma, page, nr_pages);
return err;
Expand Down Expand Up @@ -314,6 +347,7 @@ static void secretmem_cleanup_chunk(struct gen_pool *pool,
int i;

set_direct_map_default_noflush(page, nr_pages);
secretmem_unaccount_pages(page, PMD_PAGE_ORDER);

for (i = 0; i < nr_pages; i++)
clear_highpage(page + i);
Expand Down

0 comments on commit b0ac51c

Please sign in to comment.