Skip to content

Commit

Permalink
x86/sev: Use the SVSM to create a vCPU when not in VMPL0
Browse files Browse the repository at this point in the history
Using the RMPADJUST instruction, the VMSA attribute can only be changed
at VMPL0. An SVSM will be present when running at VMPL1 or a lower
privilege level.

In that case, use the SVSM_CORE_CREATE_VCPU call or the
SVSM_CORE_DESTROY_VCPU call to perform VMSA attribute changes. Use the
VMPL level supplied by the SVSM for the VMSA when starting the AP.

  [ bp: Fix typo + touchups. ]

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/bcdd95ecabe9723673b9693c7f1533a2b8f17781.1717600736.git.thomas.lendacky@amd.com
  • Loading branch information
Tom Lendacky authored and Borislav Petkov (AMD) committed Jun 17, 2024
1 parent fcd042e commit d2b2931
Show file tree
Hide file tree
Showing 2 changed files with 56 additions and 20 deletions.
2 changes: 2 additions & 0 deletions arch/x86/include/asm/sev.h
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,8 @@ struct svsm_call {
#define SVSM_CORE_CALL(x) ((0ULL << 32) | (x))
#define SVSM_CORE_REMAP_CA 0
#define SVSM_CORE_PVALIDATE 1
#define SVSM_CORE_CREATE_VCPU 2
#define SVSM_CORE_DELETE_VCPU 3

#ifdef CONFIG_AMD_MEM_ENCRYPT
extern void __sev_es_ist_enter(struct pt_regs *regs);
Expand Down
74 changes: 54 additions & 20 deletions arch/x86/kernel/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -1010,22 +1010,49 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
}

static int snp_set_vmsa(void *va, bool vmsa)
static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa)
{
u64 attrs;
int ret;

/*
* Running at VMPL0 allows the kernel to change the VMSA bit for a page
* using the RMPADJUST instruction. However, for the instruction to
* succeed it must target the permissions of a lesser privileged
* (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
* instruction in the AMD64 APM Volume 3).
*/
attrs = 1;
if (vmsa)
attrs |= RMPADJUST_VMSA_PAGE_BIT;
if (snp_vmpl) {
struct svsm_call call = {};
unsigned long flags;

local_irq_save(flags);

call.caa = this_cpu_read(svsm_caa);
call.rcx = __pa(va);

if (make_vmsa) {
/* Protocol 0, Call ID 2 */
call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU);
call.rdx = __pa(caa);
call.r8 = apic_id;
} else {
/* Protocol 0, Call ID 3 */
call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU);
}

return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
ret = svsm_perform_call_protocol(&call);

local_irq_restore(flags);
} else {
/*
* If the kernel runs at VMPL0, it can change the VMSA
* bit for a page using the RMPADJUST instruction.
* However, for the instruction to succeed it must
* target the permissions of a lesser privileged (higher
* numbered) VMPL level, so use VMPL1.
*/
u64 attrs = 1;

if (make_vmsa)
attrs |= RMPADJUST_VMSA_PAGE_BIT;

ret = rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
}

return ret;
}

#define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
Expand Down Expand Up @@ -1059,11 +1086,11 @@ static void *snp_alloc_vmsa_page(int cpu)
return page_address(p + 1);
}

static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id)
{
int err;

err = snp_set_vmsa(vmsa, false);
err = snp_set_vmsa(vmsa, NULL, apic_id, false);
if (err)
pr_err("clear VMSA page failed (%u), leaking page\n", err);
else
Expand All @@ -1074,6 +1101,7 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
{
struct sev_es_save_area *cur_vmsa, *vmsa;
struct ghcb_state state;
struct svsm_ca *caa;
unsigned long flags;
struct ghcb *ghcb;
u8 sipi_vector;
Expand Down Expand Up @@ -1120,6 +1148,9 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
if (!vmsa)
return -ENOMEM;

/* If an SVSM is present, the SVSM per-CPU CAA will be !NULL */
caa = per_cpu(svsm_caa, cpu);

/* CR4 should maintain the MCE value */
cr4 = native_read_cr4() & X86_CR4_MCE;

Expand Down Expand Up @@ -1167,11 +1198,11 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
* VMPL level
* SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
*/
vmsa->vmpl = 0;
vmsa->vmpl = snp_vmpl;
vmsa->sev_features = sev_status >> 2;

/* Switch the page over to a VMSA page now that it is initialized */
ret = snp_set_vmsa(vmsa, true);
ret = snp_set_vmsa(vmsa, caa, apic_id, true);
if (ret) {
pr_err("set VMSA page failed (%u)\n", ret);
free_page((unsigned long)vmsa);
Expand All @@ -1187,7 +1218,10 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
vc_ghcb_invalidate(ghcb);
ghcb_set_rax(ghcb, vmsa->sev_features);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE);
ghcb_set_sw_exit_info_1(ghcb,
((u64)apic_id << 32) |
((u64)snp_vmpl << 16) |
SVM_VMGEXIT_AP_CREATE);
ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));

sev_es_wr_ghcb_msr(__pa(ghcb));
Expand All @@ -1205,13 +1239,13 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)

/* Perform cleanup if there was an error */
if (ret) {
snp_cleanup_vmsa(vmsa);
snp_cleanup_vmsa(vmsa, apic_id);
vmsa = NULL;
}

/* Free up any previous VMSA page */
if (cur_vmsa)
snp_cleanup_vmsa(cur_vmsa);
snp_cleanup_vmsa(cur_vmsa, apic_id);

/* Record the current VMSA page */
per_cpu(sev_vmsa, cpu) = vmsa;
Expand Down

0 comments on commit d2b2931

Please sign in to comment.