Skip to content

Commit

Permalink
powerpc/pseries/svm: Use shared memory for LPPACA structures
Browse files Browse the repository at this point in the history
LPPACA structures need to be shared with the host. Hence they need to be in
shared memory. Instead of allocating individual chunks of memory for a
given structure from memblock, a contiguous chunk of memory is allocated
and then converted into shared memory. Subsequent allocation requests will
come from the contiguous chunk which will be always shared memory for all
structures.

While we are able to use a kmem_cache constructor for the Debug Trace Log,
LPPACAs are allocated very early in the boot process (before SLUB is
available) so we need to use a simpler scheme here.

Introduce helper is_svm_platform() which uses the S bit of the MSR to tell
whether we're running as a secure guest.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Thiago Jung Bauermann <bauerman@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190820021326.6884-9-bauerman@linux.ibm.com
  • Loading branch information
Anshuman Khandual authored and Michael Ellerman committed Aug 29, 2019
1 parent e311a92 commit bd104e6
Show file tree
Hide file tree
Showing 2 changed files with 68 additions and 1 deletion.
26 changes: 26 additions & 0 deletions arch/powerpc/include/asm/svm.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* SVM helper functions
*
* Copyright 2018 Anshuman Khandual, IBM Corporation.
*/

#ifndef _ASM_POWERPC_SVM_H
#define _ASM_POWERPC_SVM_H

#ifdef CONFIG_PPC_SVM

static inline bool is_secure_guest(void)
{
return mfmsr() & MSR_S;
}

#else /* CONFIG_PPC_SVM */

static inline bool is_secure_guest(void)
{
return false;
}

#endif /* CONFIG_PPC_SVM */
#endif /* _ASM_POWERPC_SVM_H */
43 changes: 42 additions & 1 deletion arch/powerpc/kernel/paca.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
#include <asm/sections.h>
#include <asm/pgtable.h>
#include <asm/kexec.h>
#include <asm/svm.h>
#include <asm/ultravisor.h>

#include "setup.h"

Expand Down Expand Up @@ -54,6 +56,41 @@ static void *__init alloc_paca_data(unsigned long size, unsigned long align,

#define LPPACA_SIZE 0x400

static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align,
unsigned long limit, int cpu)
{
size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE);
static unsigned long shared_lppaca_size;
static void *shared_lppaca;
void *ptr;

if (!shared_lppaca) {
memblock_set_bottom_up(true);

shared_lppaca =
memblock_alloc_try_nid(shared_lppaca_total_size,
PAGE_SIZE, MEMBLOCK_LOW_LIMIT,
limit, NUMA_NO_NODE);
if (!shared_lppaca)
panic("cannot allocate shared data");

memblock_set_bottom_up(false);
uv_share_page(PHYS_PFN(__pa(shared_lppaca)),
shared_lppaca_total_size >> PAGE_SHIFT);
}

ptr = shared_lppaca + shared_lppaca_size;
shared_lppaca_size += size;

/*
* This is very early in boot, so no harm done if the kernel crashes at
* this point.
*/
BUG_ON(shared_lppaca_size >= shared_lppaca_total_size);

return ptr;
}

/*
* See asm/lppaca.h for more detail.
*
Expand Down Expand Up @@ -83,7 +120,11 @@ static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
if (early_cpu_has_feature(CPU_FTR_HVMODE))
return NULL;

lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu);
if (is_secure_guest())
lp = alloc_shared_lppaca(LPPACA_SIZE, 0x400, limit, cpu);
else
lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu);

init_lppaca(lp);

return lp;
Expand Down

0 comments on commit bd104e6

Please sign in to comment.