Skip to content

Commit

Permalink
KVM: x86: Export the number of uret MSRs to vendor modules
Browse files Browse the repository at this point in the history
Split out and export the number of configured user return MSRs so that
VMX can iterate over the set of MSRs without having to do its own tracking.
Keep the list itself internal to x86 so that vendor code still has to go
through the "official" APIs to add/modify entries.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210504171734.1434054-13-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Sean Christopherson authored and Paolo Bonzini committed May 7, 2021
1 parent 5e17c62 commit 9cc39a5
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 16 deletions.
1 change: 1 addition & 0 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -1418,6 +1418,7 @@ struct kvm_arch_async_pf {
bool direct_map;
};

extern u32 __read_mostly kvm_nr_uret_msrs;
extern u64 __read_mostly host_efer;
extern bool __read_mostly allow_smaller_maxphyaddr;
extern struct kvm_x86_ops kvm_x86_ops;
Expand Down
29 changes: 13 additions & 16 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -184,11 +184,6 @@ module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR);
*/
#define KVM_MAX_NR_USER_RETURN_MSRS 16

struct kvm_user_return_msrs_global {
int nr;
u32 msrs[KVM_MAX_NR_USER_RETURN_MSRS];
};

struct kvm_user_return_msrs {
struct user_return_notifier urn;
bool registered;
Expand All @@ -198,7 +193,9 @@ struct kvm_user_return_msrs {
} values[KVM_MAX_NR_USER_RETURN_MSRS];
};

static struct kvm_user_return_msrs_global __read_mostly user_return_msrs_global;
u32 __read_mostly kvm_nr_uret_msrs;
EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs);
static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
static struct kvm_user_return_msrs __percpu *user_return_msrs;

#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
Expand Down Expand Up @@ -330,10 +327,10 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
user_return_notifier_unregister(urn);
}
local_irq_restore(flags);
for (slot = 0; slot < user_return_msrs_global.nr; ++slot) {
for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
values = &msrs->values[slot];
if (values->host != values->curr) {
wrmsrl(user_return_msrs_global.msrs[slot], values->host);
wrmsrl(kvm_uret_msrs_list[slot], values->host);
values->curr = values->host;
}
}
Expand All @@ -358,18 +355,18 @@ EXPORT_SYMBOL_GPL(kvm_probe_user_return_msr);
void kvm_define_user_return_msr(unsigned slot, u32 msr)
{
BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
user_return_msrs_global.msrs[slot] = msr;
if (slot >= user_return_msrs_global.nr)
user_return_msrs_global.nr = slot + 1;
kvm_uret_msrs_list[slot] = msr;
if (slot >= kvm_nr_uret_msrs)
kvm_nr_uret_msrs = slot + 1;
}
EXPORT_SYMBOL_GPL(kvm_define_user_return_msr);

int kvm_find_user_return_msr(u32 msr)
{
int i;

for (i = 0; i < user_return_msrs_global.nr; ++i) {
if (user_return_msrs_global.msrs[i] == msr)
for (i = 0; i < kvm_nr_uret_msrs; ++i) {
if (kvm_uret_msrs_list[i] == msr)
return i;
}
return -1;
Expand All @@ -383,8 +380,8 @@ static void kvm_user_return_msr_cpu_online(void)
u64 value;
int i;

for (i = 0; i < user_return_msrs_global.nr; ++i) {
rdmsrl_safe(user_return_msrs_global.msrs[i], &value);
for (i = 0; i < kvm_nr_uret_msrs; ++i) {
rdmsrl_safe(kvm_uret_msrs_list[i], &value);
msrs->values[i].host = value;
msrs->values[i].curr = value;
}
Expand All @@ -399,7 +396,7 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
value = (value & mask) | (msrs->values[slot].host & ~mask);
if (value == msrs->values[slot].curr)
return 0;
err = wrmsrl_safe(user_return_msrs_global.msrs[slot], value);
err = wrmsrl_safe(kvm_uret_msrs_list[slot], value);
if (err)
return 1;

Expand Down

0 comments on commit 9cc39a5

Please sign in to comment.