Skip to content

Commit

Permalink
Merge tag 'kvm-s390-20140530' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/kvms390/linux into kvm-next

1. Several minor fixes and cleanups for KVM:
2. Fix flag check for gdb support
3. Remove unnecessary vcpu start
4. Remove code duplication for sigp interrupts
5. Better DAT handling for the TPROT instruction
6. Correct addressing exception for standby memory
  • Loading branch information
Paolo Bonzini committed May 30, 2014
2 parents 356d4c2 + 5a5e653 commit 146b2cf
Show file tree
Hide file tree
Showing 7 changed files with 116 additions and 66 deletions.
1 change: 1 addition & 0 deletions arch/s390/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ struct kvm_s390_sie_block {
#define ICTL_ISKE 0x00004000
#define ICTL_SSKE 0x00002000
#define ICTL_RRBE 0x00001000
#define ICTL_TPROT 0x00000200
__u32 ictl; /* 0x0048 */
__u32 eca; /* 0x004c */
#define ICPT_INST 0x04
Expand Down
57 changes: 55 additions & 2 deletions arch/s390/kvm/gaccess.c
Original file line number Diff line number Diff line change
Expand Up @@ -292,15 +292,15 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
wake_up(&vcpu->kvm->arch.ipte_wq);
}

static void ipte_lock(struct kvm_vcpu *vcpu)
void ipte_lock(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.sie_block->eca & 1)
ipte_lock_siif(vcpu);
else
ipte_lock_simple(vcpu);
}

static void ipte_unlock(struct kvm_vcpu *vcpu)
void ipte_unlock(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.sie_block->eca & 1)
ipte_unlock_siif(vcpu);
Expand Down Expand Up @@ -644,6 +644,59 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
return rc;
}

/**
* guest_translate_address - translate guest logical into guest absolute address
*
* Parameter semantics are the same as the ones from guest_translate.
* The memory contents at the guest address are not changed.
*
* Note: The IPTE lock is not taken during this function, so the caller
* has to take care of this.
*/
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
unsigned long *gpa, int write)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
struct trans_exc_code_bits *tec;
union asce asce;
int rc;

/* Access register mode is not supported yet. */
if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
return -EOPNOTSUPP;

gva = kvm_s390_logical_to_effective(vcpu, gva);
memset(pgm, 0, sizeof(*pgm));
tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
tec->as = psw_bits(*psw).as;
tec->fsi = write ? FSI_STORE : FSI_FETCH;
tec->addr = gva >> PAGE_SHIFT;
if (is_low_address(gva) && low_address_protection_enabled(vcpu)) {
if (write) {
rc = pgm->code = PGM_PROTECTION;
return rc;
}
}

asce.val = get_vcpu_asce(vcpu);
if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
rc = guest_translate(vcpu, gva, gpa, write);
if (rc > 0) {
if (rc == PGM_PROTECTION)
tec->b61 = 1;
pgm->code = rc;
}
} else {
rc = 0;
*gpa = kvm_s390_real_to_abs(vcpu, gva);
if (kvm_is_error_gpa(vcpu->kvm, *gpa))
rc = pgm->code = PGM_ADDRESSING;
}

return rc;
}

/**
* kvm_s390_check_low_addr_protection - check for low-address protection
* @ga: Guest address
Expand Down
5 changes: 5 additions & 0 deletions arch/s390/kvm/gaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,9 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
return kvm_read_guest(vcpu->kvm, gpa, data, len);
}

int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
unsigned long *gpa, int write);

int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
unsigned long len, int write);

Expand Down Expand Up @@ -324,6 +327,8 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
return access_guest_real(vcpu, gra, data, len, 0);
}

void ipte_lock(struct kvm_vcpu *vcpu);
void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu);
int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);

Expand Down
1 change: 0 additions & 1 deletion arch/s390/kvm/interrupt.c
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,6 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
&vcpu->arch.sie_block->gpsw,
sizeof(psw_t));
kvm_s390_vcpu_start(vcpu);
break;
case KVM_S390_PROGRAM_INT:
VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
Expand Down
6 changes: 4 additions & 2 deletions arch/s390/kvm/kvm-s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -637,7 +637,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
if (sclp_has_siif())
vcpu->arch.sie_block->eca |= 1;
vcpu->arch.sie_block->fac = (int) (long) vfacilities;
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
ICTL_TPROT;

if (kvm_s390_cmma_enabled(vcpu->kvm)) {
rc = kvm_s390_vcpu_setup_cmma(vcpu);
if (rc)
Expand Down Expand Up @@ -950,7 +952,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
vcpu->guest_debug = 0;
kvm_s390_clear_bp_data(vcpu);

if (vcpu->guest_debug & ~VALID_GUESTDBG_FLAGS)
if (dbg->control & ~VALID_GUESTDBG_FLAGS)
return -EINVAL;

if (dbg->control & KVM_GUESTDBG_ENABLE) {
Expand Down
56 changes: 33 additions & 23 deletions arch/s390/kvm/priv.c
Original file line number Diff line number Diff line change
Expand Up @@ -930,8 +930,9 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
static int handle_tprot(struct kvm_vcpu *vcpu)
{
u64 address1, address2;
struct vm_area_struct *vma;
unsigned long user_address;
unsigned long hva, gpa;
int ret = 0, cc = 0;
bool writable;

vcpu->stat.instruction_tprot++;

Expand All @@ -942,32 +943,41 @@ static int handle_tprot(struct kvm_vcpu *vcpu)

/* we only handle the Linux memory detection case:
* access key == 0
* guest DAT == off
* everything else goes to userspace. */
if (address2 & 0xf0)
return -EOPNOTSUPP;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
return -EOPNOTSUPP;

down_read(&current->mm->mmap_sem);
user_address = __gmap_translate(address1, vcpu->arch.gmap);
if (IS_ERR_VALUE(user_address))
goto out_inject;
vma = find_vma(current->mm, user_address);
if (!vma)
goto out_inject;
vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);

up_read(&current->mm->mmap_sem);
return 0;
ipte_lock(vcpu);
ret = guest_translate_address(vcpu, address1, &gpa, 1);
if (ret == PGM_PROTECTION) {
/* Write protected? Try again with read-only... */
cc = 1;
ret = guest_translate_address(vcpu, address1, &gpa, 0);
}
if (ret) {
if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
ret = kvm_s390_inject_program_int(vcpu, ret);
} else if (ret > 0) {
/* Translation not available */
kvm_s390_set_psw_cc(vcpu, 3);
ret = 0;
}
goto out_unlock;
}

out_inject:
up_read(&current->mm->mmap_sem);
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
if (kvm_is_error_hva(hva)) {
ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
} else {
if (!writable)
cc = 1; /* Write not permitted ==> read-only */
kvm_s390_set_psw_cc(vcpu, cc);
/* Note: CC2 only occurs for storage keys (not supported yet) */
}
out_unlock:
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
ipte_unlock(vcpu);
return ret;
}

int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
Expand Down
56 changes: 18 additions & 38 deletions arch/s390/kvm/sigp.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,33 +54,23 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,

static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
struct kvm_s390_local_interrupt *li;
struct kvm_s390_interrupt_info *inti;
struct kvm_s390_interrupt s390int = {
.type = KVM_S390_INT_EMERGENCY,
.parm = vcpu->vcpu_id,
};
struct kvm_vcpu *dst_vcpu = NULL;
int rc = 0;

if (cpu_addr < KVM_MAX_VCPUS)
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;

inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti)
return -ENOMEM;

inti->type = KVM_S390_INT_EMERGENCY;
inti->emerg.code = vcpu->vcpu_id;
rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
if (!rc)
VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);

li = &dst_vcpu->arch.local_int;
spin_lock_bh(&li->lock);
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
if (waitqueue_active(li->wq))
wake_up_interruptible(li->wq);
spin_unlock_bh(&li->lock);
VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);

return SIGP_CC_ORDER_CODE_ACCEPTED;
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
}

static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
Expand Down Expand Up @@ -116,33 +106,23 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,

static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
struct kvm_s390_local_interrupt *li;
struct kvm_s390_interrupt_info *inti;
struct kvm_s390_interrupt s390int = {
.type = KVM_S390_INT_EXTERNAL_CALL,
.parm = vcpu->vcpu_id,
};
struct kvm_vcpu *dst_vcpu = NULL;
int rc;

if (cpu_addr < KVM_MAX_VCPUS)
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;

inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti)
return -ENOMEM;

inti->type = KVM_S390_INT_EXTERNAL_CALL;
inti->extcall.code = vcpu->vcpu_id;

li = &dst_vcpu->arch.local_int;
spin_lock_bh(&li->lock);
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
if (waitqueue_active(li->wq))
wake_up_interruptible(li->wq);
spin_unlock_bh(&li->lock);
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
if (!rc)
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);

return SIGP_CC_ORDER_CODE_ACCEPTED;
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
}

static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
Expand Down

0 comments on commit 146b2cf

Please sign in to comment.