Skip to content

Commit

Permalink
KVM: Add local get_mtrr_type() to support MTRR
Browse files Browse the repository at this point in the history
For EPT memory type support.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
  • Loading branch information
Sheng Yang authored and Avi Kivity committed Dec 31, 2008
1 parent 468d472 commit 74be52e
Showing 1 changed file with 104 additions and 0 deletions.
104 changes: 104 additions & 0 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1393,6 +1393,110 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
return page;
}

/*
* The function is based on mtrr_type_lookup() in
* arch/x86/kernel/cpu/mtrr/generic.c
*/
static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
u64 start, u64 end)
{
int i;
u64 base, mask;
u8 prev_match, curr_match;
int num_var_ranges = KVM_NR_VAR_MTRR;

if (!mtrr_state->enabled)
return 0xFF;

/* Make end inclusive end, instead of exclusive */
end--;

/* Look in fixed ranges. Just return the type as per start */
if (mtrr_state->have_fixed && (start < 0x100000)) {
int idx;

if (start < 0x80000) {
idx = 0;
idx += (start >> 16);
return mtrr_state->fixed_ranges[idx];
} else if (start < 0xC0000) {
idx = 1 * 8;
idx += ((start - 0x80000) >> 14);
return mtrr_state->fixed_ranges[idx];
} else if (start < 0x1000000) {
idx = 3 * 8;
idx += ((start - 0xC0000) >> 12);
return mtrr_state->fixed_ranges[idx];
}
}

/*
* Look in variable ranges
* Look of multiple ranges matching this address and pick type
* as per MTRR precedence
*/
if (!(mtrr_state->enabled & 2))
return mtrr_state->def_type;

prev_match = 0xFF;
for (i = 0; i < num_var_ranges; ++i) {
unsigned short start_state, end_state;

if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
continue;

base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
(mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
(mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);

start_state = ((start & mask) == (base & mask));
end_state = ((end & mask) == (base & mask));
if (start_state != end_state)
return 0xFE;

if ((start & mask) != (base & mask))
continue;

curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
if (prev_match == 0xFF) {
prev_match = curr_match;
continue;
}

if (prev_match == MTRR_TYPE_UNCACHABLE ||
curr_match == MTRR_TYPE_UNCACHABLE)
return MTRR_TYPE_UNCACHABLE;

if ((prev_match == MTRR_TYPE_WRBACK &&
curr_match == MTRR_TYPE_WRTHROUGH) ||
(prev_match == MTRR_TYPE_WRTHROUGH &&
curr_match == MTRR_TYPE_WRBACK)) {
prev_match = MTRR_TYPE_WRTHROUGH;
curr_match = MTRR_TYPE_WRTHROUGH;
}

if (prev_match != curr_match)
return MTRR_TYPE_UNCACHABLE;
}

if (prev_match != 0xFF)
return prev_match;

return mtrr_state->def_type;
}

static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
{
u8 mtrr;

mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
(gfn << PAGE_SHIFT) + PAGE_SIZE);
if (mtrr == 0xfe || mtrr == 0xff)
mtrr = MTRR_TYPE_WRBACK;
return mtrr;
}

static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
unsigned index;
Expand Down

0 comments on commit 74be52e

Please sign in to comment.