Skip to content

Commit

Permalink
KVM: arm64/sve: Make register ioctl access errors more consistent
Browse files Browse the repository at this point in the history
Currently, the way error codes are generated when processing the
SVE register access ioctls in a bit haphazard.

This patch refactors the code so that the behaviour is more
consistent: now, -EINVAL should be returned only for unrecognised
register IDs or when some other runtime error occurs.  -ENOENT is
returned for register IDs that are recognised, but whose
corresponding register (or slice) does not exist for the vcpu.

To this end, in {get,set}_sve_reg() we now delegate the
vcpu_has_sve() check down into {get,set}_sve_vls() and
sve_reg_to_region().  The KVM_REG_ARM64_SVE_VLS special case is
picked off first, then sve_reg_to_region() plays the role of
exhaustively validating or rejecting the register ID and (where
accepted) computing the applicable register region as before.

sve_reg_to_region() is rearranged so that -ENOENT or -EPERM is not
returned prematurely, before checking whether reg->id is in a
recognised range.

-EPERM is now only returned when an attempt is made to access an
actually existing register slice on an unfinalized vcpu.

Fixes: e1c9c98 ("KVM: arm64/sve: Add SVE support to register access ioctl interface")
Fixes: 9033bba ("KVM: arm64/sve: Add pseudo-register for the guest's vector lengths")
Suggested-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
  • Loading branch information
Dave Martin authored and Marc Zyngier committed Apr 18, 2019
1 parent f8d4635 commit 52110aa
Showing 1 changed file with 31 additions and 21 deletions.
52 changes: 31 additions & 21 deletions arch/arm64/kvm/guest.c
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,9 @@ static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
unsigned int max_vq, vq;
u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)];

if (!vcpu_has_sve(vcpu))
return -ENOENT;

if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
return -EINVAL;

Expand All @@ -242,6 +245,9 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
unsigned int max_vq, vq;
u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)];

if (!vcpu_has_sve(vcpu))
return -ENOENT;

if (kvm_arm_vcpu_sve_finalized(vcpu))
return -EPERM; /* too late! */

Expand Down Expand Up @@ -304,7 +310,10 @@ struct sve_state_reg_region {
unsigned int upad; /* extra trailing padding in user memory */
};

/* Get sanitised bounds for user/kernel SVE register copy */
/*
* Validate SVE register ID and get sanitised bounds for user/kernel SVE
* register copy
*/
static int sve_reg_to_region(struct sve_state_reg_region *region,
struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
Expand Down Expand Up @@ -335,25 +344,30 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
/* Verify that we match the UAPI header: */
BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);

if ((reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT;

vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);

reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;

if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT;

vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);

reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
SVE_SIG_REGS_OFFSET;
reqlen = KVM_SVE_ZREG_SIZE;
maxlen = SVE_SIG_ZREG_SIZE(vq);
} else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT;

vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);

reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
SVE_SIG_REGS_OFFSET;
reqlen = KVM_SVE_PREG_SIZE;
maxlen = SVE_SIG_PREG_SIZE(vq);
} else {
return -ENOENT;
return -EINVAL;
}

sve_state_size = vcpu_sve_state_size(vcpu);
Expand All @@ -369,24 +383,22 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,

static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
int ret;
struct sve_state_reg_region region;
char __user *uptr = (char __user *)reg->addr;

if (!vcpu_has_sve(vcpu))
return -ENOENT;

/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
if (reg->id == KVM_REG_ARM64_SVE_VLS)
return get_sve_vls(vcpu, reg);

/* Otherwise, reg is an architectural SVE register... */
/* Try to interpret reg ID as an architectural SVE register... */
ret = sve_reg_to_region(&region, vcpu, reg);
if (ret)
return ret;

if (!kvm_arm_vcpu_sve_finalized(vcpu))
return -EPERM;

if (sve_reg_to_region(&region, vcpu, reg))
return -ENOENT;

if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
region.klen) ||
clear_user(uptr + region.klen, region.upad))
Expand All @@ -397,24 +409,22 @@ static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)

static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
int ret;
struct sve_state_reg_region region;
const char __user *uptr = (const char __user *)reg->addr;

if (!vcpu_has_sve(vcpu))
return -ENOENT;

/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
if (reg->id == KVM_REG_ARM64_SVE_VLS)
return set_sve_vls(vcpu, reg);

/* Otherwise, reg is an architectural SVE register... */
/* Try to interpret reg ID as an architectural SVE register... */
ret = sve_reg_to_region(&region, vcpu, reg);
if (ret)
return ret;

if (!kvm_arm_vcpu_sve_finalized(vcpu))
return -EPERM;

if (sve_reg_to_region(&region, vcpu, reg))
return -ENOENT;

if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
region.klen))
return -EFAULT;
Expand Down

0 comments on commit 52110aa

Please sign in to comment.