Skip to content

Commit

Permalink
Merge tag 'kvm-s390-next-6.2-1' of https://git.kernel.org/pub/scm/lin…
Browse files Browse the repository at this point in the history
…ux/kernel/git/kvms390/linux into HEAD

- Second batch of the lazy destroy patches
- First batch of KVM changes for kernel virtual != physical address support
- Removal of a unused function
  • Loading branch information
Paolo Bonzini committed Nov 28, 2022
2 parents 29c4697 + 99b63f5 commit 1e79a9e
Show file tree
Hide file tree
Showing 19 changed files with 603 additions and 162 deletions.
41 changes: 37 additions & 4 deletions Documentation/virt/kvm/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -5163,10 +5163,13 @@ KVM_PV_ENABLE
===== =============================

KVM_PV_DISABLE
Deregister the VM from the Ultravisor and reclaim the memory that
had been donated to the Ultravisor, making it usable by the kernel
again. All registered VCPUs are converted back to non-protected
ones.
Deregister the VM from the Ultravisor and reclaim the memory that had
been donated to the Ultravisor, making it usable by the kernel again.
All registered VCPUs are converted back to non-protected ones. If a
previous protected VM had been prepared for asynchonous teardown with
KVM_PV_ASYNC_CLEANUP_PREPARE and not subsequently torn down with
KVM_PV_ASYNC_CLEANUP_PERFORM, it will be torn down in this call
together with the current protected VM.

KVM_PV_VM_SET_SEC_PARMS
Pass the image header from VM memory to the Ultravisor in
Expand Down Expand Up @@ -5289,6 +5292,36 @@ KVM_PV_DUMP
authentication tag all of which are needed to decrypt the dump at a
later time.

KVM_PV_ASYNC_CLEANUP_PREPARE
:Capability: KVM_CAP_S390_PROTECTED_ASYNC_DISABLE

Prepare the current protected VM for asynchronous teardown. Most
resources used by the current protected VM will be set aside for a
subsequent asynchronous teardown. The current protected VM will then
resume execution immediately as non-protected. There can be at most
one protected VM prepared for asynchronous teardown at any time. If
a protected VM had already been prepared for teardown without
subsequently calling KVM_PV_ASYNC_CLEANUP_PERFORM, this call will
fail. In that case, the userspace process should issue a normal
KVM_PV_DISABLE. The resources set aside with this call will need to
be cleaned up with a subsequent call to KVM_PV_ASYNC_CLEANUP_PERFORM
or KVM_PV_DISABLE, otherwise they will be cleaned up when KVM
terminates. KVM_PV_ASYNC_CLEANUP_PREPARE can be called again as soon
as cleanup starts, i.e. before KVM_PV_ASYNC_CLEANUP_PERFORM finishes.

KVM_PV_ASYNC_CLEANUP_PERFORM
:Capability: KVM_CAP_S390_PROTECTED_ASYNC_DISABLE

Tear down the protected VM previously prepared for teardown with
KVM_PV_ASYNC_CLEANUP_PREPARE. The resources that had been set aside
will be freed during the execution of this command. This PV command
should ideally be issued by userspace from a separate thread. If a
fatal signal is received (or the process terminates naturally), the
command will terminate immediately without completing, and the normal
KVM shutdown procedure will take care of cleaning up all remaining
protected VMs, including the ones whose teardown was interrupted by
process termination.

4.126 KVM_XEN_HVM_SET_ATTR
--------------------------

Expand Down
14 changes: 11 additions & 3 deletions arch/s390/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,7 @@ struct mcck_volatile_info {
CR14_EXTERNAL_DAMAGE_SUBMASK)

#define SIDAD_SIZE_MASK 0xff
#define sida_origin(sie_block) \
((sie_block)->sidad & PAGE_MASK)
#define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK)
#define sida_size(sie_block) \
((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE)

Expand Down Expand Up @@ -276,6 +275,7 @@ struct kvm_s390_sie_block {
#define ECB3_AES 0x04
#define ECB3_RI 0x01
__u8 ecb3; /* 0x0063 */
#define ESCA_SCAOL_MASK ~0x3fU
__u32 scaol; /* 0x0064 */
__u8 sdf; /* 0x0068 */
__u8 epdx; /* 0x0069 */
Expand Down Expand Up @@ -942,6 +942,8 @@ struct kvm_s390_pv {
unsigned long stor_base;
void *stor_var;
bool dumping;
void *set_aside;
struct list_head need_cleanup;
struct mmu_notifier mmu_notifier;
};

Expand Down Expand Up @@ -1017,7 +1019,13 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm);
void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
unsigned long *aqm, unsigned long *adm);

extern int sie64a(struct kvm_s390_sie_block *, u64 *);
int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa);

static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa)
{
return __sie64a(virt_to_phys(sie_block), sie_block, rsa);
}

extern char sie_exit;

extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
Expand Down
4 changes: 2 additions & 2 deletions arch/s390/include/asm/mem_encrypt.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@

#ifndef __ASSEMBLY__

int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages);
int set_memory_encrypted(unsigned long vaddr, int numpages);
int set_memory_decrypted(unsigned long vaddr, int numpages);

#endif /* __ASSEMBLY__ */

Expand Down
1 change: 1 addition & 0 deletions arch/s390/include/asm/stacktrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ struct stack_frame {
unsigned long sie_savearea;
unsigned long sie_reason;
unsigned long sie_flags;
unsigned long sie_control_block_phys;
};
};
unsigned long gprs[10];
Expand Down
10 changes: 10 additions & 0 deletions arch/s390/include/asm/uv.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
#define UVC_CMD_INIT_UV 0x000f
#define UVC_CMD_CREATE_SEC_CONF 0x0100
#define UVC_CMD_DESTROY_SEC_CONF 0x0101
#define UVC_CMD_DESTROY_SEC_CONF_FAST 0x0102
#define UVC_CMD_CREATE_SEC_CPU 0x0120
#define UVC_CMD_DESTROY_SEC_CPU 0x0121
#define UVC_CMD_CONV_TO_SEC_STOR 0x0200
Expand Down Expand Up @@ -81,6 +82,7 @@ enum uv_cmds_inst {
BIT_UVC_CMD_UNSHARE_ALL = 20,
BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
BIT_UVC_CMD_DESTROY_SEC_CONF_FAST = 23,
BIT_UVC_CMD_DUMP_INIT = 24,
BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE = 25,
BIT_UVC_CMD_DUMP_CPU = 26,
Expand Down Expand Up @@ -230,6 +232,14 @@ struct uv_cb_nodata {
u64 reserved20[4];
} __packed __aligned(8);

/* Destroy Configuration Fast */
struct uv_cb_destroy_fast {
struct uv_cb_header header;
u64 reserved08[2];
u64 handle;
u64 reserved20[5];
} __packed __aligned(8);

/* Set Shared Access */
struct uv_cb_share {
struct uv_cb_header header;
Expand Down
1 change: 1 addition & 0 deletions arch/s390/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ int main(void)
OFFSET(__SF_SIE_SAVEAREA, stack_frame, sie_savearea);
OFFSET(__SF_SIE_REASON, stack_frame, sie_reason);
OFFSET(__SF_SIE_FLAGS, stack_frame, sie_flags);
OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
BLANK();
/* idle data offsets */
Expand Down
26 changes: 15 additions & 11 deletions arch/s390/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -225,18 +225,20 @@ ENDPROC(__switch_to)

#if IS_ENABLED(CONFIG_KVM)
/*
* sie64a calling convention:
* %r2 pointer to sie control block
* %r3 guest register save area
* __sie64a calling convention:
* %r2 pointer to sie control block phys
* %r3 pointer to sie control block virt
* %r4 guest register save area
*/
ENTRY(sie64a)
ENTRY(__sie64a)
stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
lg %r12,__LC_CURRENT
stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer
stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area
stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical..
stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses
stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area
xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
lmg %r0,%r13,0(%r3) # load guest gprs 0-13
lmg %r0,%r13,0(%r4) # load guest gprs 0-13
lg %r14,__LC_GMAP # get gmap pointer
ltgr %r14,%r14
jz .Lsie_gmap
Expand All @@ -248,6 +250,7 @@ ENTRY(sie64a)
jnz .Lsie_skip
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsie_skip # exit if fp/vx regs changed
lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr
BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_entry:
sie 0(%r14)
Expand All @@ -258,13 +261,14 @@ ENTRY(sie64a)
BPOFF
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_skip:
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
.Lsie_done:
# some program checks are suppressing. C code (e.g. do_protection_exception)
# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
# Other instructions between sie64a and .Lsie_done should not cause program
# Other instructions between __sie64a and .Lsie_done should not cause program
# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
.Lrewind_pad6:
nopr 7
Expand Down Expand Up @@ -293,8 +297,8 @@ sie_exit:
EX_TABLE(.Lrewind_pad4,.Lsie_fault)
EX_TABLE(.Lrewind_pad2,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault)
ENDPROC(sie64a)
EXPORT_SYMBOL(sie64a)
ENDPROC(__sie64a)
EXPORT_SYMBOL(__sie64a)
EXPORT_SYMBOL(sie_exit)
#endif

Expand Down Expand Up @@ -373,7 +377,7 @@ ENTRY(pgm_check_handler)
j 3f # -> fault in user space
.Lpgm_skip_asce:
#if IS_ENABLED(CONFIG_KVM)
# cleanup critical section for program checks in sie64a
# cleanup critical section for program checks in __sie64a
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f
SIEEXIT
lghi %r10,_PIF_GUEST_FAULT
Expand Down
7 changes: 7 additions & 0 deletions arch/s390/kernel/uv.c
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,13 @@ static int make_secure_pte(pte_t *ptep, unsigned long addr,
*/
static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
{
/*
* The misc feature indicates, among other things, that importing a
* shared page from a different protected VM will automatically also
* transfer its ownership.
*/
if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications))
return false;
if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
return false;
return atomic_read(&mm->context.protected_count) > 1;
Expand Down
9 changes: 4 additions & 5 deletions arch/s390/kvm/intercept.c
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ static int handle_itdb(struct kvm_vcpu *vcpu)
return 0;
if (current->thread.per_flags & PER_FLAG_NO_TE)
return 0;
itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba;
itdb = phys_to_virt(vcpu->arch.sie_block->itdba);
rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
if (rc)
return rc;
Expand Down Expand Up @@ -409,8 +409,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
out:
if (!cc) {
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
memcpy((void *)(sida_origin(vcpu->arch.sie_block)),
sctns, PAGE_SIZE);
memcpy(sida_addr(vcpu->arch.sie_block), sctns, PAGE_SIZE);
} else {
r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
if (r) {
Expand Down Expand Up @@ -464,7 +463,7 @@ static int handle_operexc(struct kvm_vcpu *vcpu)

static int handle_pv_spx(struct kvm_vcpu *vcpu)
{
u32 pref = *(u32 *)vcpu->arch.sie_block->sidad;
u32 pref = *(u32 *)sida_addr(vcpu->arch.sie_block);

kvm_s390_set_prefix(vcpu, pref);
trace_kvm_s390_handle_prefix(vcpu, 1, pref);
Expand Down Expand Up @@ -497,7 +496,7 @@ static int handle_pv_sclp(struct kvm_vcpu *vcpu)

static int handle_pv_uvc(struct kvm_vcpu *vcpu)
{
struct uv_cb_share *guest_uvcb = (void *)vcpu->arch.sie_block->sidad;
struct uv_cb_share *guest_uvcb = sida_addr(vcpu->arch.sie_block);
struct uv_cb_cts uvcb = {
.header.cmd = UVC_CMD_UNPIN_PAGE_SHARED,
.header.len = sizeof(uvcb),
Expand Down
5 changes: 0 additions & 5 deletions arch/s390/kvm/interrupt.c
Original file line number Diff line number Diff line change
Expand Up @@ -314,11 +314,6 @@ static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
return READ_ONCE(gisa->ipm);
}

static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{
clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
}

static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{
return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
Expand Down
Loading

0 comments on commit 1e79a9e

Please sign in to comment.