Skip to content

Commit

Permalink
x86/sev: Use kernel provided SVSM Calling Areas
Browse files Browse the repository at this point in the history
The SVSM Calling Area (CA) is used to communicate between Linux and the
SVSM. Since the firmware supplied CA for the BSP is likely to be in
reserved memory, switch off that CA to a kernel provided CA so that access
and use of the CA is available during boot. The CA switch is done using
the SVSM core protocol SVSM_CORE_REMAP_CA call.

An SVSM call is executed by filling out the SVSM CA and setting the proper
register state as documented by the SVSM protocol. The SVSM is invoked by
by requesting the hypervisor to run VMPL0.

Once it is safe to allocate/reserve memory, allocate a CA for each CPU.
After allocating the new CAs, the BSP will switch from the boot CA to the
per-CPU CA. The CA for an AP is identified to the SVSM when creating the
VMSA in preparation for booting the AP.

  [ bp: Heavily simplify svsm_issue_call() asm, other touchups. ]

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/fa8021130bcc3bcf14d722a25548cb0cdf325456.1717600736.git.thomas.lendacky@amd.com
  • Loading branch information
Tom Lendacky authored and Borislav Petkov (AMD) committed Jun 11, 2024
1 parent 878e70d commit 34ff659
Show file tree
Hide file tree
Showing 6 changed files with 362 additions and 39 deletions.
13 changes: 13 additions & 0 deletions arch/x86/include/asm/sev-common.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,19 @@ enum psc_op {
/* GHCBData[63:32] */ \
(((u64)(val) & GENMASK_ULL(63, 32)) >> 32)

/* GHCB Run at VMPL Request/Response */
#define GHCB_MSR_VMPL_REQ 0x016
#define GHCB_MSR_VMPL_REQ_LEVEL(v) \
/* GHCBData[39:32] */ \
(((u64)(v) & GENMASK_ULL(7, 0) << 32) | \
/* GHCBDdata[11:0] */ \
GHCB_MSR_VMPL_REQ)

#define GHCB_MSR_VMPL_RESP 0x017
#define GHCB_MSR_VMPL_RESP_VAL(v) \
/* GHCBData[63:32] */ \
(((u64)(v) & GENMASK_ULL(63, 32)) >> 32)

/* GHCB Hypervisor Feature Request/Response */
#define GHCB_MSR_HV_FT_REQ 0x080
#define GHCB_MSR_HV_FT_RESP 0x081
Expand Down
32 changes: 32 additions & 0 deletions arch/x86/include/asm/sev.h
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,36 @@ struct svsm_ca {
u8 svsm_buffer[PAGE_SIZE - 8];
};

#define SVSM_SUCCESS 0
#define SVSM_ERR_INCOMPLETE 0x80000000
#define SVSM_ERR_UNSUPPORTED_PROTOCOL 0x80000001
#define SVSM_ERR_UNSUPPORTED_CALL 0x80000002
#define SVSM_ERR_INVALID_ADDRESS 0x80000003
#define SVSM_ERR_INVALID_FORMAT 0x80000004
#define SVSM_ERR_INVALID_PARAMETER 0x80000005
#define SVSM_ERR_INVALID_REQUEST 0x80000006
#define SVSM_ERR_BUSY 0x80000007

/*
* SVSM protocol structure
*/
struct svsm_call {
struct svsm_ca *caa;
u64 rax;
u64 rcx;
u64 rdx;
u64 r8;
u64 r9;
u64 rax_out;
u64 rcx_out;
u64 rdx_out;
u64 r8_out;
u64 r9_out;
};

#define SVSM_CORE_CALL(x) ((0ULL << 32) | (x))
#define SVSM_CORE_REMAP_CA 0

#ifdef CONFIG_AMD_MEM_ENCRYPT
extern void __sev_es_ist_enter(struct pt_regs *regs);
extern void __sev_es_ist_exit(void);
Expand Down Expand Up @@ -260,6 +290,7 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 snp_get_unsupported_features(u64 status);
u64 sev_get_status(void);
void sev_show_status(void);
void snp_update_svsm_ca(void);
#else
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
static inline void sev_es_ist_exit(void) { }
Expand Down Expand Up @@ -289,6 +320,7 @@ static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
static inline u64 sev_get_status(void) { return 0; }
static inline void sev_show_status(void) { }
static inline void snp_update_svsm_ca(void) { }
#endif

#ifdef CONFIG_KVM_AMD_SEV
Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/uapi/asm/svm.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@
#define SVM_VMGEXIT_AP_CREATE_ON_INIT 0
#define SVM_VMGEXIT_AP_CREATE 1
#define SVM_VMGEXIT_AP_DESTROY 2
#define SVM_VMGEXIT_SNP_RUN_VMPL 0x80000018
#define SVM_VMGEXIT_HV_FEATURES 0x8000fffd
#define SVM_VMGEXIT_TERM_REQUEST 0x8000fffe
#define SVM_VMGEXIT_TERM_REASON(reason_set, reason_code) \
Expand Down
128 changes: 126 additions & 2 deletions arch/x86/kernel/sev-shared.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
#define WARN(condition, format...) (!!(condition))
#define sev_printk(fmt, ...)
#define sev_printk_rtl(fmt, ...)
#undef vc_forward_exception
#define vc_forward_exception(c) panic("SNP: Hypervisor requested exception\n")
#endif

/*
Expand Down Expand Up @@ -244,6 +246,126 @@ static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt
return ES_VMM_ERROR;
}

static inline int svsm_process_result_codes(struct svsm_call *call)
{
switch (call->rax_out) {
case SVSM_SUCCESS:
return 0;
case SVSM_ERR_INCOMPLETE:
case SVSM_ERR_BUSY:
return -EAGAIN;
default:
return -EINVAL;
}
}

/*
* Issue a VMGEXIT to call the SVSM:
* - Load the SVSM register state (RAX, RCX, RDX, R8 and R9)
* - Set the CA call pending field to 1
* - Issue VMGEXIT
* - Save the SVSM return register state (RAX, RCX, RDX, R8 and R9)
* - Perform atomic exchange of the CA call pending field
*
* - See the "Secure VM Service Module for SEV-SNP Guests" specification for
* details on the calling convention.
* - The calling convention loosely follows the Microsoft X64 calling
* convention by putting arguments in RCX, RDX, R8 and R9.
* - RAX specifies the SVSM protocol/callid as input and the return code
* as output.
*/
static __always_inline void svsm_issue_call(struct svsm_call *call, u8 *pending)
{
register unsigned long rax asm("rax") = call->rax;
register unsigned long rcx asm("rcx") = call->rcx;
register unsigned long rdx asm("rdx") = call->rdx;
register unsigned long r8 asm("r8") = call->r8;
register unsigned long r9 asm("r9") = call->r9;

call->caa->call_pending = 1;

asm volatile("rep; vmmcall\n\t"
: "+r" (rax), "+r" (rcx), "+r" (rdx), "+r" (r8), "+r" (r9)
: : "memory");

*pending = xchg(&call->caa->call_pending, *pending);

call->rax_out = rax;
call->rcx_out = rcx;
call->rdx_out = rdx;
call->r8_out = r8;
call->r9_out = r9;
}

static int svsm_perform_msr_protocol(struct svsm_call *call)
{
u8 pending = 0;
u64 val, resp;

/*
* When using the MSR protocol, be sure to save and restore
* the current MSR value.
*/
val = sev_es_rd_ghcb_msr();

sev_es_wr_ghcb_msr(GHCB_MSR_VMPL_REQ_LEVEL(0));

svsm_issue_call(call, &pending);

resp = sev_es_rd_ghcb_msr();

sev_es_wr_ghcb_msr(val);

if (pending)
return -EINVAL;

if (GHCB_RESP_CODE(resp) != GHCB_MSR_VMPL_RESP)
return -EINVAL;

if (GHCB_MSR_VMPL_RESP_VAL(resp))
return -EINVAL;

return svsm_process_result_codes(call);
}

static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call)
{
struct es_em_ctxt ctxt;
u8 pending = 0;

vc_ghcb_invalidate(ghcb);

/*
* Fill in protocol and format specifiers. This can be called very early
* in the boot, so use rip-relative references as needed.
*/
ghcb->protocol_version = RIP_REL_REF(ghcb_version);
ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;

ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL);
ghcb_set_sw_exit_info_1(ghcb, 0);
ghcb_set_sw_exit_info_2(ghcb, 0);

sev_es_wr_ghcb_msr(__pa(ghcb));

svsm_issue_call(call, &pending);

if (pending)
return -EINVAL;

switch (verify_exception_info(ghcb, &ctxt)) {
case ES_OK:
break;
case ES_EXCEPTION:
vc_forward_exception(&ctxt);
fallthrough;
default:
return -EINVAL;
}

return svsm_process_result_codes(call);
}

static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
struct es_em_ctxt *ctxt,
u64 exit_code, u64 exit_info_1,
Expand Down Expand Up @@ -1289,7 +1411,7 @@ static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
* Maintain the GPA of the SVSM Calling Area (CA) in order to utilize the SVSM
* services needed when not running in VMPL0.
*/
static void __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
{
struct snp_secrets_page *secrets_page;
u64 caa;
Expand All @@ -1311,7 +1433,7 @@ static void __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
* code and the early kernel code.
*/
if (!rmpadjust((unsigned long)&RIP_REL_REF(boot_ghcb_page), RMP_PG_SIZE_4K, 1))
return;
return false;

/*
* Not running at VMPL0, ensure everything has been properly supplied
Expand Down Expand Up @@ -1344,4 +1466,6 @@ static void __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
*/
RIP_REL_REF(boot_svsm_caa) = (struct svsm_ca *)caa;
RIP_REL_REF(boot_svsm_caa_pa) = caa;

return true;
}
Loading

0 comments on commit 34ff659

Please sign in to comment.