Skip to content

Commit

Permalink
KVM: x86: introduce ISA specific smi_allowed callback
Browse files Browse the repository at this point in the history
Similar to NMI, there may be ISA specific reasons why an SMI cannot be
injected into the guest. This commit adds a new smi_allowed callback to
be implemented in following commits.

Signed-off-by: Ladi Prosek <lprosek@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Ladi Prosek authored and Paolo Bonzini committed Oct 12, 2017
1 parent 0234bf8 commit 72d7b37
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 1 deletion.
1 change: 1 addition & 0 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -1062,6 +1062,7 @@ struct kvm_x86_ops {

void (*setup_mce)(struct kvm_vcpu *vcpu);

int (*smi_allowed)(struct kvm_vcpu *vcpu);
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
};
Expand Down
6 changes: 6 additions & 0 deletions arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -5401,6 +5401,11 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
vcpu->arch.mcg_cap &= 0x1ff;
}

static int svm_smi_allowed(struct kvm_vcpu *vcpu)
{
return 1;
}

static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
/* TODO: Implement */
Expand Down Expand Up @@ -5524,6 +5529,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.update_pi_irte = svm_update_pi_irte,
.setup_mce = svm_setup_mce,

.smi_allowed = svm_smi_allowed,
.pre_enter_smm = svm_pre_enter_smm,
.pre_leave_smm = svm_pre_leave_smm,
};
Expand Down
6 changes: 6 additions & 0 deletions arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -11916,6 +11916,11 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
~FEATURE_CONTROL_LMCE;
}

static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
{
return 1;
}

static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
/* TODO: Implement */
Expand Down Expand Up @@ -12054,6 +12059,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {

.setup_mce = vmx_setup_mce,

.smi_allowed = vmx_smi_allowed,
.pre_enter_smm = vmx_pre_enter_smm,
.pre_leave_smm = vmx_pre_leave_smm,
};
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -6438,7 +6438,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
}

kvm_x86_ops->queue_exception(vcpu);
} else if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
} else if (vcpu->arch.smi_pending && !is_smm(vcpu) && kvm_x86_ops->smi_allowed(vcpu)) {
vcpu->arch.smi_pending = false;
enter_smm(vcpu);
} else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
Expand Down

0 comments on commit 72d7b37

Please sign in to comment.