Skip to content

Commit

Permalink
arm64: Enable memory encrypt for Realms
Browse files Browse the repository at this point in the history
Use the memory encryption APIs to trigger a RSI call to request a
transition between protected memory and shared memory (or vice versa)
and updating the kernel's linear map of modified pages to flip the top
bit of the IPA. This requires that block mappings are not used in the
direct map for realm guests.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Co-developed-by: Steven Price <steven.price@arm.com>
Signed-off-by: Steven Price <steven.price@arm.com>
Link: https://lore.kernel.org/r/20241017131434.40935-10-steven.price@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
  • Loading branch information
Suzuki K Poulose authored and Catalin Marinas committed Oct 23, 2024
1 parent 0e9cb59 commit 42be24a
Show file tree
Hide file tree
Showing 6 changed files with 123 additions and 3 deletions.
3 changes: 3 additions & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ config ARM64
select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_CC_PLATFORM
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
Expand All @@ -44,6 +45,8 @@ config ARM64
select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
select ARCH_STACKWALK
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
Expand Down
9 changes: 9 additions & 0 deletions arch/arm64/include/asm/mem_encrypt.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
#ifndef __ASM_MEM_ENCRYPT_H
#define __ASM_MEM_ENCRYPT_H

#include <asm/rsi.h>

struct arm64_mem_crypt_ops {
int (*encrypt)(unsigned long addr, int numpages);
int (*decrypt)(unsigned long addr, int numpages);
Expand All @@ -12,4 +14,11 @@ int arm64_mem_crypt_ops_register(const struct arm64_mem_crypt_ops *ops);
int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages);

int realm_register_memory_enc_ops(void);

static inline bool force_dma_unencrypted(struct device *dev)
{
return is_realm_world();
}

#endif /* __ASM_MEM_ENCRYPT_H */
5 changes: 5 additions & 0 deletions arch/arm64/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -684,6 +684,11 @@ static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
#define pgprot_nx(prot) \
__pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)

#define pgprot_decrypted(prot) \
__pgprot_modify(prot, PROT_NS_SHARED, PROT_NS_SHARED)
#define pgprot_encrypted(prot) \
__pgprot_modify(prot, PROT_NS_SHARED, 0)

/*
* Mark the prot value as uncacheable and unbufferable.
*/
Expand Down
3 changes: 3 additions & 0 deletions arch/arm64/include/asm/set_memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,7 @@ int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
bool kernel_page_present(struct page *page);

int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages);

#endif /* _ASM_ARM64_SET_MEMORY_H */
16 changes: 16 additions & 0 deletions arch/arm64/kernel/rsi.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,10 @@
#include <linux/memblock.h>
#include <linux/psci.h>
#include <linux/swiotlb.h>
#include <linux/cc_platform.h>

#include <asm/io.h>
#include <asm/mem_encrypt.h>
#include <asm/rsi.h>

static struct realm_config config;
Expand All @@ -19,6 +21,17 @@ EXPORT_SYMBOL(prot_ns_shared);
DEFINE_STATIC_KEY_FALSE_RO(rsi_present);
EXPORT_SYMBOL(rsi_present);

bool cc_platform_has(enum cc_attr attr)
{
switch (attr) {
case CC_ATTR_MEM_ENCRYPT:
return is_realm_world();
default:
return false;
}
}
EXPORT_SYMBOL_GPL(cc_platform_has);

static bool rsi_version_matches(void)
{
unsigned long ver_lower, ver_higher;
Expand Down Expand Up @@ -119,6 +132,9 @@ void __init arm64_rsi_init(void)
if (arm64_ioremap_prot_hook_register(realm_ioremap_hook))
return;

if (realm_register_memory_enc_ops())
return;

arm64_rsi_setup_memory();

static_branch_enable(&rsi_present);
Expand Down
90 changes: 87 additions & 3 deletions arch/arm64/mm/pageattr.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,12 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mem_encrypt.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>

#include <asm/cacheflush.h>
#include <asm/pgtable-prot.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <asm/kfence.h>
Expand All @@ -23,14 +25,16 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
bool can_set_direct_map(void)
{
/*
* rodata_full and DEBUG_PAGEALLOC require linear map to be
* mapped at page granularity, so that it is possible to
* rodata_full, DEBUG_PAGEALLOC and a Realm guest all require linear
* map to be mapped at page granularity, so that it is possible to
* protect/unprotect single pages.
*
* KFENCE pool requires page-granular mapping if initialized late.
*
* Realms need to make pages shared/protected at page granularity.
*/
return rodata_full || debug_pagealloc_enabled() ||
arm64_kfence_can_set_direct_map();
arm64_kfence_can_set_direct_map() || is_realm_world();
}

static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
Expand Down Expand Up @@ -198,6 +202,86 @@ int set_direct_map_default_noflush(struct page *page)
PAGE_SIZE, change_page_range, &data);
}

static int __set_memory_enc_dec(unsigned long addr,
int numpages,
bool encrypt)
{
unsigned long set_prot = 0, clear_prot = 0;
phys_addr_t start, end;
int ret;

if (!is_realm_world())
return 0;

if (!__is_lm_address(addr))
return -EINVAL;

start = __virt_to_phys(addr);
end = start + numpages * PAGE_SIZE;

if (encrypt)
clear_prot = PROT_NS_SHARED;
else
set_prot = PROT_NS_SHARED;

/*
* Break the mapping before we make any changes to avoid stale TLB
* entries or Synchronous External Aborts caused by RIPAS_EMPTY
*/
ret = __change_memory_common(addr, PAGE_SIZE * numpages,
__pgprot(set_prot),
__pgprot(clear_prot | PTE_VALID));

if (ret)
return ret;

if (encrypt)
ret = rsi_set_memory_range_protected(start, end);
else
ret = rsi_set_memory_range_shared(start, end);

if (ret)
return ret;

return __change_memory_common(addr, PAGE_SIZE * numpages,
__pgprot(PTE_VALID),
__pgprot(0));
}

static int realm_set_memory_encrypted(unsigned long addr, int numpages)
{
int ret = __set_memory_enc_dec(addr, numpages, true);

/*
* If the request to change state fails, then the only sensible cause
* of action for the caller is to leak the memory
*/
WARN(ret, "Failed to encrypt memory, %d pages will be leaked",
numpages);

return ret;
}

static int realm_set_memory_decrypted(unsigned long addr, int numpages)
{
int ret = __set_memory_enc_dec(addr, numpages, false);

WARN(ret, "Failed to decrypt memory, %d pages will be leaked",
numpages);

return ret;
}

static const struct arm64_mem_crypt_ops realm_crypt_ops = {
.encrypt = realm_set_memory_encrypted,
.decrypt = realm_set_memory_decrypted,
};

int realm_register_memory_enc_ops(void)
{
return arm64_mem_crypt_ops_register(&realm_crypt_ops);
}

#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
Expand Down

0 comments on commit 42be24a

Please sign in to comment.