Skip to content

Commit

Permalink
Merge branch 'topic/mem-encrypt' into next
Browse files Browse the repository at this point in the history
This branch has some cross-arch patches that are a prequisite for the
SVM work. They're in a topic branch in case any of the other arch
maintainers want to merge them to resolve conflicts.
  • Loading branch information
Michael Ellerman committed Aug 29, 2019
2 parents bc605cd + 5cbdaee commit 07aa1e7
Show file tree
Hide file tree
Showing 13 changed files with 42 additions and 46 deletions.
3 changes: 3 additions & 0 deletions arch/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -925,6 +925,9 @@ config LOCK_EVENT_COUNTS
the chance of application behavior change because of timing
differences. The counts are reported via debugfs.

config ARCH_HAS_MEM_ENCRYPT
bool

source "kernel/gcov/Kconfig"

source "scripts/gcc-plugins/Kconfig"
Expand Down
4 changes: 1 addition & 3 deletions arch/s390/Kconfig
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
config ARCH_HAS_MEM_ENCRYPT
def_bool y

config MMU
def_bool y

Expand Down Expand Up @@ -68,6 +65,7 @@ config S390
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX
Expand Down
5 changes: 1 addition & 4 deletions arch/s390/include/asm/mem_encrypt.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,7 @@

#ifndef __ASSEMBLY__

#define sme_me_mask 0ULL

static inline bool sme_active(void) { return false; }
extern bool sev_active(void);
static inline bool mem_encrypt_active(void) { return false; }

int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages);
Expand Down
7 changes: 1 addition & 6 deletions arch/s390/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -156,14 +156,9 @@ int set_memory_decrypted(unsigned long addr, int numpages)
}

/* are we a protected virtualization guest? */
bool sev_active(void)
{
return is_prot_virt_guest();
}

bool force_dma_unencrypted(struct device *dev)
{
return sev_active();
return is_prot_virt_guest();
}

/* protected virtualization */
Expand Down
4 changes: 1 addition & 3 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ config X86
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_PTE_DEVMAP if X86_64
Expand Down Expand Up @@ -1518,9 +1519,6 @@ config X86_CPA_STATISTICS
helps to determine the effectiveness of preserving large and huge
page mappings when mapping protections are changed.

config ARCH_HAS_MEM_ENCRYPT
def_bool y

config AMD_MEM_ENCRYPT
bool "AMD Secure Memory Encryption (SME) support"
depends on X86_64 && CPU_SUP_AMD
Expand Down
10 changes: 10 additions & 0 deletions arch/x86/include/asm/mem_encrypt.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,16 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;

extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];

static inline bool mem_encrypt_active(void)
{
return sme_me_mask;
}

static inline u64 sme_get_me_mask(void)
{
return sme_me_mask;
}

#endif /* __ASSEMBLY__ */

#endif /* __X86_MEM_ENCRYPT_H__ */
5 changes: 5 additions & 0 deletions arch/x86/kernel/crash_dump_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,3 +70,8 @@ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
{
return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true);
}

ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
return read_from_oldmem(buf, count, ppos, 0, sev_active());
}
2 changes: 0 additions & 2 deletions arch/x86/mm/mem_encrypt.c
Original file line number Diff line number Diff line change
Expand Up @@ -344,13 +344,11 @@ bool sme_active(void)
{
return sme_me_mask && !sev_enabled;
}
EXPORT_SYMBOL(sme_active);

bool sev_active(void)
{
return sme_me_mask && sev_enabled;
}
EXPORT_SYMBOL(sev_active);

/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
bool force_dma_unencrypted(struct device *dev)
Expand Down
8 changes: 4 additions & 4 deletions fs/proc/vmcore.c
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,9 @@ static int pfn_is_ram(unsigned long pfn)
}

/* Reads a page from the oldmem device from given offset. */
static ssize_t read_from_oldmem(char *buf, size_t count,
u64 *ppos, int userbuf,
bool encrypted)
ssize_t read_from_oldmem(char *buf, size_t count,
u64 *ppos, int userbuf,
bool encrypted)
{
unsigned long pfn, offset;
size_t nr_bytes;
Expand Down Expand Up @@ -170,7 +170,7 @@ void __weak elfcorehdr_free(unsigned long long addr)
*/
ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
return read_from_oldmem(buf, count, ppos, 0, sev_active());
return read_from_oldmem(buf, count, ppos, 0, false);
}

/*
Expand Down
14 changes: 14 additions & 0 deletions include/linux/crash_dump.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,4 +115,18 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
return -EOPNOTSUPP;
}
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */

#ifdef CONFIG_PROC_VMCORE
ssize_t read_from_oldmem(char *buf, size_t count,
u64 *ppos, int userbuf,
bool encrypted);
#else
static inline ssize_t read_from_oldmem(char *buf, size_t count,
u64 *ppos, int userbuf,
bool encrypted)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_PROC_VMCORE */

#endif /* LINUX_CRASHDUMP_H */
15 changes: 1 addition & 14 deletions include/linux/mem_encrypt.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,23 +18,10 @@

#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */

#define sme_me_mask 0ULL

static inline bool sme_active(void) { return false; }
static inline bool sev_active(void) { return false; }
static inline bool mem_encrypt_active(void) { return false; }

#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */

static inline bool mem_encrypt_active(void)
{
return sme_me_mask;
}

static inline u64 sme_get_me_mask(void)
{
return sme_me_mask;
}

#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
* The __sme_set() and __sme_clr() macros are useful for adding or removing
Expand Down
8 changes: 0 additions & 8 deletions kernel/dma/mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -291,12 +291,6 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
EXPORT_SYMBOL(dma_free_attrs);

static inline void dma_check_mask(struct device *dev, u64 mask)
{
if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
}

int dma_supported(struct device *dev, u64 mask)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
Expand Down Expand Up @@ -327,7 +321,6 @@ int dma_set_mask(struct device *dev, u64 mask)
return -EIO;

arch_dma_set_mask(dev, mask);
dma_check_mask(dev, mask);
*dev->dma_mask = mask;
return 0;
}
Expand All @@ -345,7 +338,6 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
if (!dma_supported(dev, mask))
return -EIO;

dma_check_mask(dev, mask);
dev->coherent_dma_mask = mask;
return 0;
}
Expand Down
3 changes: 1 addition & 2 deletions kernel/dma/swiotlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -461,8 +461,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");

if (mem_encrypt_active())
pr_warn_once("%s is active and system is using DMA bounce buffers\n",
sme_active() ? "SME" : "SEV");
pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");

mask = dma_get_seg_boundary(hwdev);

Expand Down

0 comments on commit 07aa1e7

Please sign in to comment.