Skip to content

Commit

Permalink
x86/sev: Allow for use of the early boot GHCB for PSC requests
Browse files Browse the repository at this point in the history
Using a GHCB for a page stage change (as opposed to the MSR protocol)
allows for multiple pages to be processed in a single request. In prep
for early PSC requests in support of unaccepted memory, update the
invocation of vmgexit_psc() to be able to use the early boot GHCB and not
just the per-CPU GHCB structure.

In order to use the proper GHCB (early boot vs per-CPU), set a flag that
indicates when the per-CPU GHCBs are available and registered. For APs,
the per-CPU GHCBs are created before they are started and registered upon
startup, so this flag can be used globally for the BSP and APs instead of
creating a per-CPU flag. This will allow for a significant reduction in
the number of MSR protocol page state change requests when accepting
memory.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/d6cbb21f87f81eb8282dd3bf6c34d9698c8a4bbc.1686063086.git.thomas.lendacky@amd.com
  • Loading branch information
Tom Lendacky authored and Borislav Petkov (AMD) committed Jun 6, 2023
1 parent 69dcb1e commit 7006b75
Showing 1 changed file with 38 additions and 23 deletions.
61 changes: 38 additions & 23 deletions arch/x86/kernel/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,19 @@ static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);

struct sev_config {
__u64 debug : 1,
__reserved : 63;

/*
* A flag used by __set_pages_state() that indicates when the
* per-CPU GHCB has been created and registered and thus can be
* used by the BSP instead of the early boot GHCB.
*
* For APs, the per-CPU GHCB is created before they are started
* and registered upon startup, so this flag can be used globally
* for the BSP and APs.
*/
ghcbs_initialized : 1,

__reserved : 62;
};

static struct sev_config sev_cfg __read_mostly;
Expand Down Expand Up @@ -662,7 +674,7 @@ static void pvalidate_pages(unsigned long vaddr, unsigned long npages, bool vali
}
}

static void __init early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)
static void early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)
{
unsigned long paddr_end;
u64 val;
Expand Down Expand Up @@ -756,26 +768,13 @@ void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op
WARN(1, "invalid memory op %d\n", op);
}

static int vmgexit_psc(struct snp_psc_desc *desc)
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
{
int cur_entry, end_entry, ret = 0;
struct snp_psc_desc *data;
struct ghcb_state state;
struct es_em_ctxt ctxt;
unsigned long flags;
struct ghcb *ghcb;

/*
* __sev_get_ghcb() needs to run with IRQs disabled because it is using
* a per-CPU GHCB.
*/
local_irq_save(flags);

ghcb = __sev_get_ghcb(&state);
if (!ghcb) {
ret = 1;
goto out_unlock;
}
vc_ghcb_invalidate(ghcb);

/* Copy the input desc into GHCB shared buffer */
data = (struct snp_psc_desc *)ghcb->shared_buffer;
Expand Down Expand Up @@ -832,20 +831,18 @@ static int vmgexit_psc(struct snp_psc_desc *desc)
}

out:
__sev_put_ghcb(&state);

out_unlock:
local_irq_restore(flags);

return ret;
}

static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
unsigned long vaddr_end, int op)
{
struct ghcb_state state;
struct psc_hdr *hdr;
struct psc_entry *e;
unsigned long flags;
unsigned long pfn;
struct ghcb *ghcb;
int i;

hdr = &data->hdr;
Expand Down Expand Up @@ -875,15 +872,31 @@ static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
i++;
}

if (vmgexit_psc(data))
local_irq_save(flags);

if (sev_cfg.ghcbs_initialized)
ghcb = __sev_get_ghcb(&state);
else
ghcb = boot_ghcb;

if (!ghcb || vmgexit_psc(ghcb, data))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);

if (sev_cfg.ghcbs_initialized)
__sev_put_ghcb(&state);

local_irq_restore(flags);
}

static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
{
unsigned long vaddr_end, next_vaddr;
struct snp_psc_desc desc;

/* Use the MSR protocol when a GHCB is not available. */
if (!boot_ghcb)
return early_set_pages_state(__pa(vaddr), npages, op);

vaddr = vaddr & PAGE_MASK;
vaddr_end = vaddr + (npages << PAGE_SHIFT);

Expand Down Expand Up @@ -1261,6 +1274,8 @@ void setup_ghcb(void)
if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
snp_register_per_cpu_ghcb();

sev_cfg.ghcbs_initialized = true;

return;
}

Expand Down

0 comments on commit 7006b75

Please sign in to comment.