Skip to content

Commit

Permalink
KVM: SVM: Add intercept check for emulated cr accesses
Browse files Browse the repository at this point in the history
This patch adds all necessary intercept checks for
instructions that access the crX registers.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
  • Loading branch information
Joerg Roedel authored and Avi Kivity committed May 11, 2011
1 parent 8a76d7f commit cfec82c
Show file tree
Hide file tree
Showing 5 changed files with 192 additions and 25 deletions.
3 changes: 3 additions & 0 deletions arch/x86/include/asm/kvm_emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -304,6 +304,9 @@ enum x86_intercept_stage {

enum x86_intercept {
x86_intercept_none,
x86_intercept_cr_read,
x86_intercept_cr_write,
x86_intercept_clts,
x86_intercept_lmsw,
x86_intercept_smsw,
x86_intercept_lidt,
Expand Down
15 changes: 15 additions & 0 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,25 @@
#define KVM_PIO_PAGE_OFFSET 1
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2

#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
| X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
| X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))

#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
0xFFFFFF0000000000ULL)
#define CR4_RESERVED_BITS \
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXSAVE \
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))

#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)



#define INVALID_PAGE (~(hpa_t)0)
#define VALID_PAGE(x) ((x) != INVALID_PAGE)
Expand Down
105 changes: 94 additions & 11 deletions arch/x86/kvm/emulate.c
Original file line number Diff line number Diff line change
Expand Up @@ -2445,6 +2445,95 @@ static int em_movdqu(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}

static bool valid_cr(int nr)
{
switch (nr) {
case 0:
case 2 ... 4:
case 8:
return true;
default:
return false;
}
}

static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
struct decode_cache *c = &ctxt->decode;

if (!valid_cr(c->modrm_reg))
return emulate_ud(ctxt);

return X86EMUL_CONTINUE;
}

static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
struct decode_cache *c = &ctxt->decode;
u64 new_val = c->src.val64;
int cr = c->modrm_reg;

static u64 cr_reserved_bits[] = {
0xffffffff00000000ULL,
0, 0, 0, /* CR3 checked later */
CR4_RESERVED_BITS,
0, 0, 0,
CR8_RESERVED_BITS,
};

if (!valid_cr(cr))
return emulate_ud(ctxt);

if (new_val & cr_reserved_bits[cr])
return emulate_gp(ctxt, 0);

switch (cr) {
case 0: {
u64 cr4, efer;
if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
return emulate_gp(ctxt, 0);

cr4 = ctxt->ops->get_cr(4, ctxt->vcpu);
ctxt->ops->get_msr(ctxt->vcpu, MSR_EFER, &efer);

if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
!(cr4 & X86_CR4_PAE))
return emulate_gp(ctxt, 0);

break;
}
case 3: {
u64 rsvd = 0;

if (is_long_mode(ctxt->vcpu))
rsvd = CR3_L_MODE_RESERVED_BITS;
else if (is_pae(ctxt->vcpu))
rsvd = CR3_PAE_RESERVED_BITS;
else if (is_paging(ctxt->vcpu))
rsvd = CR3_NONPAE_RESERVED_BITS;

if (new_val & rsvd)
return emulate_gp(ctxt, 0);

break;
}
case 4: {
u64 cr4, efer;

cr4 = ctxt->ops->get_cr(4, ctxt->vcpu);
ctxt->ops->get_msr(ctxt->vcpu, MSR_EFER, &efer);

if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
return emulate_gp(ctxt, 0);

break;
}
}

return X86EMUL_CONTINUE;
}

#define D(_y) { .flags = (_y) }
#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
Expand Down Expand Up @@ -2632,14 +2721,16 @@ static struct opcode opcode_table[256] = {
static struct opcode twobyte_table[256] = {
/* 0x00 - 0x0F */
N, GD(0, &group7), N, N,
N, D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv), N,
N, D(ImplicitOps | VendorSpecific), DI(ImplicitOps | Priv, clts), N,
DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
N, D(ImplicitOps | ModRM), N, N,
/* 0x10 - 0x1F */
N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
/* 0x20 - 0x2F */
D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264),
D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264),
DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
D(ModRM | DstMem | Priv | Op3264),
DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
D(ModRM | SrcMem | Priv | Op3264),
N, N, N, N,
N, N, N, N, N, N, N, N,
/* 0x30 - 0x3F */
Expand Down Expand Up @@ -3724,14 +3815,6 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
case 0x18: /* Grp16 (prefetch/nop) */
break;
case 0x20: /* mov cr, reg */
switch (c->modrm_reg) {
case 1:
case 5 ... 7:
case 9 ... 15:
emulate_ud(ctxt);
rc = X86EMUL_PROPAGATE_FAULT;
goto done;
}
c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
break;
case 0x21: /* mov from dr to reg */
Expand Down
81 changes: 80 additions & 1 deletion arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -3868,11 +3868,90 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
update_cr0_intercept(svm);
}

#define POST_EX(exit) { .exit_code = (exit), \
.stage = X86_ICPT_POST_EXCEPT, \
.valid = true }

static struct __x86_intercept {
u32 exit_code;
enum x86_intercept_stage stage;
bool valid;
} x86_intercept_map[] = {
[x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
[x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
[x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
[x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
[x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
};

#undef POST_EX

static int svm_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
return X86EMUL_CONTINUE;
struct vcpu_svm *svm = to_svm(vcpu);
int vmexit, ret = X86EMUL_CONTINUE;
struct __x86_intercept icpt_info;
struct vmcb *vmcb = svm->vmcb;

if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
goto out;

icpt_info = x86_intercept_map[info->intercept];

if (!icpt_info.valid || stage != icpt_info.stage)
goto out;

switch (icpt_info.exit_code) {
case SVM_EXIT_READ_CR0:
if (info->intercept == x86_intercept_cr_read)
icpt_info.exit_code += info->modrm_reg;
break;
case SVM_EXIT_WRITE_CR0: {
unsigned long cr0, val;
u64 intercept;

if (info->intercept == x86_intercept_cr_write)
icpt_info.exit_code += info->modrm_reg;

if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
break;

intercept = svm->nested.intercept;

if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
break;

cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;

if (info->intercept == x86_intercept_lmsw) {
cr0 &= 0xfUL;
val &= 0xfUL;
/* lmsw can't clear PE - catch this here */
if (cr0 & X86_CR0_PE)
val |= X86_CR0_PE;
}

if (cr0 ^ val)
icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;

break;
}
default:
break;
}

vmcb->control.next_rip = info->next_rip;
vmcb->control.exit_code = icpt_info.exit_code;
vmexit = nested_svm_exit_handled(svm);

ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
: X86EMUL_CONTINUE;

out:
return ret;
}

static struct kvm_x86_ops svm_x86_ops = {
Expand Down
13 changes: 0 additions & 13 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,19 +60,6 @@
#include <asm/div64.h>

#define MAX_IO_MSRS 256
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
| X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
| X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
#define CR4_RESERVED_BITS \
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXSAVE \
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))

#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)

#define KVM_MAX_MCE_BANKS 32
#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)

Expand Down

0 comments on commit cfec82c

Please sign in to comment.