Skip to content

Commit

Permalink
Merge tag 'x86-urgent-2020-04-12' of git://git.kernel.org/pub/scm/lin…
Browse files Browse the repository at this point in the history
…ux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
 "A set of three patches to fix the fallout of the newly added split
  lock detection feature.

  It addressed the case where a KVM guest triggers a split lock #AC and
  KVM reinjects it into the guest which is not prepared to handle it.

  Add proper sanity checks which prevent the unconditional injection
  into the guest and handles the #AC on the host side in the same way as
  user space detections are handled. Depending on the detection mode it
  either warns and disables detection for the task or kills the task if
  the mode is set to fatal"

* tag 'x86-urgent-2020-04-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  KVM: VMX: Extend VMXs #AC interceptor to handle split lock #AC in guest
  KVM: x86: Emulate split-lock access as a write in emulator
  x86/split_lock: Provide handle_guest_split_lock()
  • Loading branch information
Linus Torvalds committed Apr 12, 2020
2 parents 0785249 + e6f8b6c commit 4f8a3cc
Show file tree
Hide file tree
Showing 4 changed files with 79 additions and 9 deletions.
6 changes: 6 additions & 0 deletions arch/x86/include/asm/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,18 @@ unsigned int x86_stepping(unsigned int sig);
extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c);
extern void switch_to_sld(unsigned long tifn);
extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
extern bool handle_guest_split_lock(unsigned long ip);
#else
static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {}
static inline void switch_to_sld(unsigned long tifn) {}
static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
{
return false;
}

static inline bool handle_guest_split_lock(unsigned long ip)
{
return false;
}
#endif
#endif /* _ASM_X86_CPU_H */
33 changes: 28 additions & 5 deletions arch/x86/kernel/cpu/intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#include <asm/elf.h>
#include <asm/cpu_device_id.h>
#include <asm/cmdline.h>
#include <asm/traps.h>

#ifdef CONFIG_X86_64
#include <linux/topology.h>
Expand Down Expand Up @@ -1066,13 +1067,10 @@ static void split_lock_init(void)
split_lock_verify_msr(sld_state != sld_off);
}

bool handle_user_split_lock(struct pt_regs *regs, long error_code)
static void split_lock_warn(unsigned long ip)
{
if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
return false;

pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
current->comm, current->pid, regs->ip);
current->comm, current->pid, ip);

/*
* Disable the split lock detection for this task so it can make
Expand All @@ -1081,6 +1079,31 @@ bool handle_user_split_lock(struct pt_regs *regs, long error_code)
*/
sld_update_msr(false);
set_tsk_thread_flag(current, TIF_SLD);
}

bool handle_guest_split_lock(unsigned long ip)
{
if (sld_state == sld_warn) {
split_lock_warn(ip);
return true;
}

pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n",
current->comm, current->pid,
sld_state == sld_fatal ? "fatal" : "bogus", ip);

current->thread.error_code = 0;
current->thread.trap_nr = X86_TRAP_AC;
force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
return false;
}
EXPORT_SYMBOL_GPL(handle_guest_split_lock);

bool handle_user_split_lock(struct pt_regs *regs, long error_code)
{
if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
return false;
split_lock_warn(regs->ip);
return true;
}

Expand Down
37 changes: 34 additions & 3 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -4588,6 +4588,26 @@ static int handle_machine_check(struct kvm_vcpu *vcpu)
return 1;
}

/*
* If the host has split lock detection disabled, then #AC is
* unconditionally injected into the guest, which is the pre split lock
* detection behaviour.
*
* If the host has split lock detection enabled then #AC is
* only injected into the guest when:
* - Guest CPL == 3 (user mode)
* - Guest has #AC detection enabled in CR0
* - Guest EFLAGS has AC bit set
*/
static inline bool guest_inject_ac(struct kvm_vcpu *vcpu)
{
if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
return true;

return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) &&
(kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
}

static int handle_exception_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
Expand Down Expand Up @@ -4653,9 +4673,6 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
return handle_rmode_exception(vcpu, ex_no, error_code);

switch (ex_no) {
case AC_VECTOR:
kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
return 1;
case DB_VECTOR:
dr6 = vmcs_readl(EXIT_QUALIFICATION);
if (!(vcpu->guest_debug &
Expand Down Expand Up @@ -4684,6 +4701,20 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
kvm_run->debug.arch.exception = ex_no;
break;
case AC_VECTOR:
if (guest_inject_ac(vcpu)) {
kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
return 1;
}

/*
* Handle split lock. Depending on detection mode this will
* either warn and disable split lock detection for this
* task or force SIGBUS on it.
*/
if (handle_guest_split_lock(kvm_rip_read(vcpu)))
return 1;
fallthrough;
default:
kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
kvm_run->ex.exception = ex_no;
Expand Down
12 changes: 11 additions & 1 deletion arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -5839,6 +5839,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
{
struct kvm_host_map map;
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u64 page_line_mask;
gpa_t gpa;
char *kaddr;
bool exchanged;
Expand All @@ -5853,7 +5854,16 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
(gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
goto emul_write;

if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
/*
* Emulate the atomic as a straight write to avoid #AC if SLD is
* enabled in the host and the access splits a cache line.
*/
if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
page_line_mask = ~(cache_line_size() - 1);
else
page_line_mask = PAGE_MASK;

if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask))
goto emul_write;

if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))
Expand Down

0 comments on commit 4f8a3cc

Please sign in to comment.