From 0c9c1d56397518eb823d458b00b06bcccd956794 Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 6 Aug 2019 01:49:14 -0300 Subject: [PATCH 1/6] x86, s390: Move ARCH_HAS_MEM_ENCRYPT definition to arch/Kconfig powerpc is also going to use this feature, so put it in a generic location. Signed-off-by: Thiago Jung Bauermann Reviewed-by: Thomas Gleixner Reviewed-by: Christoph Hellwig Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190806044919.10622-2-bauerman@linux.ibm.com --- arch/Kconfig | 3 +++ arch/s390/Kconfig | 4 +--- arch/x86/Kconfig | 4 +--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index a7b57dd42c266..89e2e3f64f791 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -925,6 +925,9 @@ config LOCK_EVENT_COUNTS the chance of application behavior change because of timing differences. The counts are reported via debugfs. +config ARCH_HAS_MEM_ENCRYPT + bool + source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index a4ad2733eedf6..f43319c444548 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -1,7 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -config ARCH_HAS_MEM_ENCRYPT - def_bool y - config MMU def_bool y @@ -68,6 +65,7 @@ config S390 select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV + select ARCH_HAS_MEM_ENCRYPT select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_MEMORY select ARCH_HAS_STRICT_KERNEL_RWX diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 222855cc01584..06027809c5999 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -68,6 +68,7 @@ config X86 select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_KCOV if X86_64 + select ARCH_HAS_MEM_ENCRYPT select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PMEM_API if X86_64 select ARCH_HAS_PTE_DEVMAP if X86_64 @@ -1518,9 +1519,6 @@ config X86_CPA_STATISTICS helps to determine the effectiveness of preserving large and huge page mappings when mapping protections are changed. -config ARCH_HAS_MEM_ENCRYPT - def_bool y - config AMD_MEM_ENCRYPT bool "AMD Secure Memory Encryption (SME) support" depends on X86_64 && CPU_SUP_AMD From 47e5d8f9ed34a5310f45f48bbc15f1e61d83b6db Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 6 Aug 2019 01:49:15 -0300 Subject: [PATCH 2/6] swiotlb: Remove call to sme_active() sme_active() is an x86-specific function so it's better not to call it from generic code. There's no need to mention which memory encryption feature is active, so just use a more generic message. Besides, other architectures will have different names for similar technology. Signed-off-by: Thiago Jung Bauermann Reviewed-by: Christoph Hellwig Reviewed-by: Tom Lendacky Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190806044919.10622-3-bauerman@linux.ibm.com --- kernel/dma/swiotlb.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 9de232229063b..f29caad71e130 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -461,8 +461,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); if (mem_encrypt_active()) - pr_warn_once("%s is active and system is using DMA bounce buffers\n", - sme_active() ? "SME" : "SEV"); + pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); mask = dma_get_seg_boundary(hwdev); From e740815a97e2b6d6446792f4328378e66de166d1 Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 6 Aug 2019 01:49:16 -0300 Subject: [PATCH 3/6] dma-mapping: Remove dma_check_mask() sme_active() is an x86-specific function so it's better not to call it from generic code. Christoph Hellwig mentioned that "There is no reason why we should have a special debug printk just for one specific reason why there is a requirement for a large DMA mask.", so just remove dma_check_mask(). Signed-off-by: Thiago Jung Bauermann Reviewed-by: Christoph Hellwig Reviewed-by: Tom Lendacky Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190806044919.10622-4-bauerman@linux.ibm.com --- kernel/dma/mapping.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 1f628e7ac7097..61eeefbfcb360 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -291,12 +291,6 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, } EXPORT_SYMBOL(dma_free_attrs); -static inline void dma_check_mask(struct device *dev, u64 mask) -{ - if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) - dev_warn(dev, "SME is active, device will require DMA bounce buffers\n"); -} - int dma_supported(struct device *dev, u64 mask) { const struct dma_map_ops *ops = get_dma_ops(dev); @@ -327,7 +321,6 @@ int dma_set_mask(struct device *dev, u64 mask) return -EIO; arch_dma_set_mask(dev, mask); - dma_check_mask(dev, mask); *dev->dma_mask = mask; return 0; } @@ -345,7 +338,6 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) if (!dma_supported(dev, mask)) return -EIO; - dma_check_mask(dev, mask); dev->coherent_dma_mask = mask; return 0; } From 284e21fab2cfcf90dacce565e0b12f29e5df00c1 Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 6 Aug 2019 01:49:17 -0300 Subject: [PATCH 4/6] x86, s390/mm: Move sme_active() and sme_me_mask to x86-specific header Now that generic code doesn't reference them, move sme_active() and sme_me_mask to x86's . Also remove the export for sme_active() since it's only used in files that won't be built as modules. sme_me_mask on the other hand is used in arch/x86/kvm/svm.c (via __sme_set() and __psp_pa()) which can be built as a module so its export needs to stay. Signed-off-by: Thiago Jung Bauermann Reviewed-by: Christoph Hellwig Reviewed-by: Tom Lendacky Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190806044919.10622-5-bauerman@linux.ibm.com --- arch/s390/include/asm/mem_encrypt.h | 4 +--- arch/x86/include/asm/mem_encrypt.h | 10 ++++++++++ arch/x86/mm/mem_encrypt.c | 1 - include/linux/mem_encrypt.h | 14 +------------- 4 files changed, 12 insertions(+), 17 deletions(-) diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h index 3eb018508190e..ff813a56bc300 100644 --- a/arch/s390/include/asm/mem_encrypt.h +++ b/arch/s390/include/asm/mem_encrypt.h @@ -4,9 +4,7 @@ #ifndef __ASSEMBLY__ -#define sme_me_mask 0ULL - -static inline bool sme_active(void) { return false; } +static inline bool mem_encrypt_active(void) { return false; } extern bool sev_active(void); int set_memory_encrypted(unsigned long addr, int numpages); diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 0c196c47d6215..848ce43b9040d 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -92,6 +92,16 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; +static inline bool mem_encrypt_active(void) +{ + return sme_me_mask; +} + +static inline u64 sme_get_me_mask(void) +{ + return sme_me_mask; +} + #endif /* __ASSEMBLY__ */ #endif /* __X86_MEM_ENCRYPT_H__ */ diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index fece30ca8b0cb..94da5a88abe62 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -344,7 +344,6 @@ bool sme_active(void) { return sme_me_mask && !sev_enabled; } -EXPORT_SYMBOL(sme_active); bool sev_active(void) { diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h index 470bd53a89df2..0c5b0ff9eb299 100644 --- a/include/linux/mem_encrypt.h +++ b/include/linux/mem_encrypt.h @@ -18,23 +18,11 @@ #else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ -#define sme_me_mask 0ULL - -static inline bool sme_active(void) { return false; } +static inline bool mem_encrypt_active(void) { return false; } static inline bool sev_active(void) { return false; } #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ -static inline bool mem_encrypt_active(void) -{ - return sme_me_mask; -} - -static inline u64 sme_get_me_mask(void) -{ - return sme_me_mask; -} - #ifdef CONFIG_AMD_MEM_ENCRYPT /* * The __sme_set() and __sme_clr() macros are useful for adding or removing From ae7eb82a92fae4d255a0caa9f7c0f99e3babfec1 Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 6 Aug 2019 01:49:18 -0300 Subject: [PATCH 5/6] fs/core/vmcore: Move sev_active() reference to x86 arch code Secure Encrypted Virtualization is an x86-specific feature, so it shouldn't appear in generic kernel code because it forces non-x86 architectures to define the sev_active() function, which doesn't make a lot of sense. To solve this problem, add an x86 elfcorehdr_read() function to override the generic weak implementation. To do that, it's necessary to make read_from_oldmem() public so that it can be used outside of vmcore.c. Also, remove the export for sev_active() since it's only used in files that won't be built as modules. Signed-off-by: Thiago Jung Bauermann Reviewed-by: Christoph Hellwig Reviewed-by: Lianbo Jiang Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190806044919.10622-6-bauerman@linux.ibm.com --- arch/x86/kernel/crash_dump_64.c | 5 +++++ arch/x86/mm/mem_encrypt.c | 1 - fs/proc/vmcore.c | 8 ++++---- include/linux/crash_dump.h | 14 ++++++++++++++ include/linux/mem_encrypt.h | 1 - 5 files changed, 23 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index 22369dd5de3b8..045e82e8945b3 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c @@ -70,3 +70,8 @@ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, { return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true); } + +ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) +{ + return read_from_oldmem(buf, count, ppos, 0, sev_active()); +} diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 94da5a88abe62..9268c12458c84 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -349,7 +349,6 @@ bool sev_active(void) { return sme_me_mask && sev_enabled; } -EXPORT_SYMBOL(sev_active); /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ bool force_dma_unencrypted(struct device *dev) diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 7bcc92add72c1..7b13988796e17 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -104,9 +104,9 @@ static int pfn_is_ram(unsigned long pfn) } /* Reads a page from the oldmem device from given offset. */ -static ssize_t read_from_oldmem(char *buf, size_t count, - u64 *ppos, int userbuf, - bool encrypted) +ssize_t read_from_oldmem(char *buf, size_t count, + u64 *ppos, int userbuf, + bool encrypted) { unsigned long pfn, offset; size_t nr_bytes; @@ -170,7 +170,7 @@ void __weak elfcorehdr_free(unsigned long long addr) */ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos) { - return read_from_oldmem(buf, count, ppos, 0, sev_active()); + return read_from_oldmem(buf, count, ppos, 0, false); } /* diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index f774c5eb9e3cc..4664fc1871de6 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -115,4 +115,18 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data) return -EOPNOTSUPP; } #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ + +#ifdef CONFIG_PROC_VMCORE +ssize_t read_from_oldmem(char *buf, size_t count, + u64 *ppos, int userbuf, + bool encrypted); +#else +static inline ssize_t read_from_oldmem(char *buf, size_t count, + u64 *ppos, int userbuf, + bool encrypted) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_PROC_VMCORE */ + #endif /* LINUX_CRASHDUMP_H */ diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h index 0c5b0ff9eb299..5c4a18a91f898 100644 --- a/include/linux/mem_encrypt.h +++ b/include/linux/mem_encrypt.h @@ -19,7 +19,6 @@ #else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ static inline bool mem_encrypt_active(void) { return false; } -static inline bool sev_active(void) { return false; } #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ From 5cbdaeefb655072d304744812708b3f3a31c6b51 Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 6 Aug 2019 01:49:19 -0300 Subject: [PATCH 6/6] s390/mm: Remove sev_active() function All references to sev_active() were moved to arch/x86 so we don't need to define it for s390 anymore. Signed-off-by: Thiago Jung Bauermann Reviewed-by: Christoph Hellwig Reviewed-by: Halil Pasic Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190806044919.10622-7-bauerman@linux.ibm.com --- arch/s390/include/asm/mem_encrypt.h | 1 - arch/s390/mm/init.c | 7 +------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h index ff813a56bc300..2542cbf7e2d18 100644 --- a/arch/s390/include/asm/mem_encrypt.h +++ b/arch/s390/include/asm/mem_encrypt.h @@ -5,7 +5,6 @@ #ifndef __ASSEMBLY__ static inline bool mem_encrypt_active(void) { return false; } -extern bool sev_active(void); int set_memory_encrypted(unsigned long addr, int numpages); int set_memory_decrypted(unsigned long addr, int numpages); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 20340a03ad90c..a124f19f7b3cc 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -156,14 +156,9 @@ int set_memory_decrypted(unsigned long addr, int numpages) } /* are we a protected virtualization guest? */ -bool sev_active(void) -{ - return is_prot_virt_guest(); -} - bool force_dma_unencrypted(struct device *dev) { - return sev_active(); + return is_prot_virt_guest(); } /* protected virtualization */