Skip to content

Commit

Permalink
set_memory: allow set_direct_map_*_noflush() for multiple pages
Browse files Browse the repository at this point in the history
The underlying implementations of set_direct_map_invalid_noflush() and
set_direct_map_default_noflush() allow updating multiple contiguous pages
at once.

Add numpages parameter to set_direct_map_*_noflush() to expose this
ability with these APIs.

Link: https://lkml.kernel.org/r/20210121122723.3446-5-rppt@kernel.org
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>	[arm64]
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Christopher Lameter <cl@linux.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: Hagen Paul Pfeifer <hagen@jauu.net>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Palmer Dabbelt <palmerdabbelt@google.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rick Edgecombe <rick.p.edgecombe@intel.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tycho Andersen <tycho@tycho.ws>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
  • Loading branch information
Mike Rapoport authored and Stephen Rothwell committed Feb 8, 2021
1 parent 057ad09 commit 19a55a2
Show file tree
Hide file tree
Showing 9 changed files with 27 additions and 24 deletions.
4 changes: 2 additions & 2 deletions arch/arm64/include/asm/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,8 +133,8 @@ static __always_inline void __flush_icache_all(void)

int set_memory_valid(unsigned long addr, int numpages, int enable);

int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
int set_direct_map_invalid_noflush(struct page *page, int numpages);
int set_direct_map_default_noflush(struct page *page, int numpages);
bool kernel_page_present(struct page *page);

#include <asm-generic/cacheflush.h>
Expand Down
10 changes: 6 additions & 4 deletions arch/arm64/mm/pageattr.c
Original file line number Diff line number Diff line change
Expand Up @@ -148,34 +148,36 @@ int set_memory_valid(unsigned long addr, int numpages, int enable)
__pgprot(PTE_VALID));
}

int set_direct_map_invalid_noflush(struct page *page)
int set_direct_map_invalid_noflush(struct page *page, int numpages)
{
struct page_change_data data = {
.set_mask = __pgprot(0),
.clear_mask = __pgprot(PTE_VALID),
};
unsigned long size = PAGE_SIZE * numpages;

if (!debug_pagealloc_enabled() && !rodata_full)
return 0;

return apply_to_page_range(&init_mm,
(unsigned long)page_address(page),
PAGE_SIZE, change_page_range, &data);
size, change_page_range, &data);
}

int set_direct_map_default_noflush(struct page *page)
int set_direct_map_default_noflush(struct page *page, int numpages)
{
struct page_change_data data = {
.set_mask = __pgprot(PTE_VALID | PTE_WRITE),
.clear_mask = __pgprot(PTE_RDONLY),
};
unsigned long size = PAGE_SIZE * numpages;

if (!debug_pagealloc_enabled() && !rodata_full)
return 0;

return apply_to_page_range(&init_mm,
(unsigned long)page_address(page),
PAGE_SIZE, change_page_range, &data);
size, change_page_range, &data);
}

#ifdef CONFIG_DEBUG_PAGEALLOC
Expand Down
4 changes: 2 additions & 2 deletions arch/riscv/include/asm/set_memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ static inline void protect_kernel_text_data(void) {}
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
#endif

int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
int set_direct_map_invalid_noflush(struct page *page, int numpages);
int set_direct_map_default_noflush(struct page *page, int numpages);
bool kernel_page_present(struct page *page);

#endif /* __ASSEMBLY__ */
Expand Down
8 changes: 4 additions & 4 deletions arch/riscv/mm/pageattr.c
Original file line number Diff line number Diff line change
Expand Up @@ -156,11 +156,11 @@ int set_memory_nx(unsigned long addr, int numpages)
return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
}

int set_direct_map_invalid_noflush(struct page *page)
int set_direct_map_invalid_noflush(struct page *page, int numpages)
{
int ret;
unsigned long start = (unsigned long)page_address(page);
unsigned long end = start + PAGE_SIZE;
unsigned long end = start + PAGE_SIZE * numpages;
struct pageattr_masks masks = {
.set_mask = __pgprot(0),
.clear_mask = __pgprot(_PAGE_PRESENT)
Expand All @@ -173,11 +173,11 @@ int set_direct_map_invalid_noflush(struct page *page)
return ret;
}

int set_direct_map_default_noflush(struct page *page)
int set_direct_map_default_noflush(struct page *page, int numpages)
{
int ret;
unsigned long start = (unsigned long)page_address(page);
unsigned long end = start + PAGE_SIZE;
unsigned long end = start + PAGE_SIZE * numpages;
struct pageattr_masks masks = {
.set_mask = PAGE_KERNEL,
.clear_mask = __pgprot(0)
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/set_memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@ int set_pages_wb(struct page *page, int numpages);
int set_pages_ro(struct page *page, int numpages);
int set_pages_rw(struct page *page, int numpages);

int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
int set_direct_map_invalid_noflush(struct page *page, int numpages);
int set_direct_map_default_noflush(struct page *page, int numpages);
bool kernel_page_present(struct page *page);

extern int kernel_set_to_readonly;
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/mm/pat/set_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -2184,14 +2184,14 @@ static int __set_pages_np(struct page *page, int numpages)
return __change_page_attr_set_clr(&cpa, 0);
}

int set_direct_map_invalid_noflush(struct page *page)
int set_direct_map_invalid_noflush(struct page *page, int numpages)
{
return __set_pages_np(page, 1);
return __set_pages_np(page, numpages);
}

int set_direct_map_default_noflush(struct page *page)
int set_direct_map_default_noflush(struct page *page, int numpages)
{
return __set_pages_p(page, 1);
return __set_pages_p(page, numpages);
}

#ifdef CONFIG_DEBUG_PAGEALLOC
Expand Down
4 changes: 2 additions & 2 deletions include/linux/set_memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@ static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif

#ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP
static inline int set_direct_map_invalid_noflush(struct page *page)
static inline int set_direct_map_invalid_noflush(struct page *page, int numpages)
{
return 0;
}
static inline int set_direct_map_default_noflush(struct page *page)
static inline int set_direct_map_default_noflush(struct page *page, int numpages)
{
return 0;
}
Expand Down
4 changes: 2 additions & 2 deletions kernel/power/snapshot.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ static inline void hibernate_restore_unprotect_page(void *page_address) {}
static inline void hibernate_map_page(struct page *page)
{
if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
int ret = set_direct_map_default_noflush(page);
int ret = set_direct_map_default_noflush(page, 1);

if (ret)
pr_warn_once("Failed to remap page\n");
Expand All @@ -99,7 +99,7 @@ static inline void hibernate_unmap_page(struct page *page)
{
if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
unsigned long addr = (unsigned long)page_address(page);
int ret = set_direct_map_invalid_noflush(page);
int ret = set_direct_map_invalid_noflush(page, 1);

if (ret)
pr_warn_once("Failed to remap page\n");
Expand Down
5 changes: 3 additions & 2 deletions mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2195,13 +2195,14 @@ struct vm_struct *remove_vm_area(const void *addr)
}

static inline void set_area_direct_map(const struct vm_struct *area,
int (*set_direct_map)(struct page *page))
int (*set_direct_map)(struct page *page,
int numpages))
{
int i;

for (i = 0; i < area->nr_pages; i++)
if (page_address(area->pages[i]))
set_direct_map(area->pages[i]);
set_direct_map(area->pages[i], 1);
}

/* Handle removing and resetting vm mappings related to the vm_struct. */
Expand Down

0 comments on commit 19a55a2

Please sign in to comment.