Skip to content

Commit

Permalink
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linu…
Browse files Browse the repository at this point in the history
…x/kernel/git/tip/tip

Pull x86 asm updates from Ingo Molnar:
 "This is another big update. Main changes are:

   - lots of x86 system call (and other traps/exceptions) entry code
     enhancements.  In particular the complex parts of the 64-bit entry
     code have been migrated to C code as well, and a number of dusty
     corners have been refreshed.  (Andy Lutomirski)

   - vDSO special mapping robustification and general cleanups (Andy
     Lutomirski)

   - cpufeature refactoring, cleanups and speedups (Borislav Petkov)

   - lots of other changes ..."

* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (64 commits)
  x86/cpufeature: Enable new AVX-512 features
  x86/entry/traps: Show unhandled signal for i386 in do_trap()
  x86/entry: Call enter_from_user_mode() with IRQs off
  x86/entry/32: Change INT80 to be an interrupt gate
  x86/entry: Improve system call entry comments
  x86/entry: Remove TIF_SINGLESTEP entry work
  x86/entry/32: Add and check a stack canary for the SYSENTER stack
  x86/entry/32: Simplify and fix up the SYSENTER stack #DB/NMI fixup
  x86/entry: Only allocate space for tss_struct::SYSENTER_stack if needed
  x86/entry: Vastly simplify SYSENTER TF (single-step) handling
  x86/entry/traps: Clear DR6 early in do_debug() and improve the comment
  x86/entry/traps: Clear TIF_BLOCKSTEP on all debug exceptions
  x86/entry/32: Restore FLAGS on SYSEXIT
  x86/entry/32: Filter NT and speed up AC filtering in SYSENTER
  x86/entry/compat: In SYSENTER, sink AC clearing below the existing FLAGS test
  selftests/x86: In syscall_nt, test NT|TF as well
  x86/asm-offsets: Remove PARAVIRT_enabled
  x86/entry/32: Introduce and use X86_BUG_ESPFIX instead of paravirt_enabled
  uprobes: __create_xol_area() must nullify xol_mapping.fault
  x86/cpufeature: Create a new synthetic cpu capability for machine check recovery
  ...
  • Loading branch information
Linus Torvalds committed Mar 15, 2016
2 parents e23604e + d050049 commit ba33ea8
Show file tree
Hide file tree
Showing 105 changed files with 1,977 additions and 1,199 deletions.
2 changes: 1 addition & 1 deletion Documentation/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -666,7 +666,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.

clearcpuid=BITNUM [X86]
Disable CPUID feature X for the kernel. See
arch/x86/include/asm/cpufeature.h for the valid bit
arch/x86/include/asm/cpufeatures.h for the valid bit
numbers. Note the Linux specific bits are not necessarily
stable over kernel options, but the vendor specific
ones should be.
Expand Down
2 changes: 2 additions & 0 deletions Documentation/x86/x86_64/boot-options.txt
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ Machine check
threshold to 1. Enabling this may make memory predictive failure
analysis less effective if the bios sets thresholds for memory
errors since we will not see details for all errors.
mce=recovery
Force-enable recoverable machine check code paths

nomce (for compatibility with i386): same as mce=off

Expand Down
10 changes: 0 additions & 10 deletions arch/x86/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -338,16 +338,6 @@ config DEBUG_IMR_SELFTEST

If unsure say N here.

config X86_DEBUG_STATIC_CPU_HAS
bool "Debug alternatives"
depends on DEBUG_KERNEL
---help---
This option causes additional code to be generated which
fails if static_cpu_has() is used before alternatives have
run.

If unsure, say N.

config X86_DEBUG_FPU
bool "Debug the x86 FPU code"
depends on DEBUG_KERNEL
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/boot/cpuflags.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef BOOT_CPUFLAGS_H
#define BOOT_CPUFLAGS_H

#include <asm/cpufeature.h>
#include <asm/cpufeatures.h>
#include <asm/processor-flags.h>

struct cpu_features {
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/boot/mkcpustr.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

#include "../include/asm/required-features.h"
#include "../include/asm/disabled-features.h"
#include "../include/asm/cpufeature.h"
#include "../include/asm/cpufeatures.h"
#include "../kernel/cpu/capflags.c"

int main(void)
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/crypto/crc32-pclmul_glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
#include <linux/crc32.h>
#include <crypto/internal/hash.h>

#include <asm/cpufeature.h>
#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
#include <asm/fpu/api.h>

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/crypto/crc32c-intel_glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
#include <linux/kernel.h>
#include <crypto/internal/hash.h>

#include <asm/cpufeature.h>
#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
#include <asm/fpu/internal.h>

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/crypto/crct10dif-pclmul_glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/fpu/api.h>
#include <asm/cpufeature.h>
#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>

asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
Expand Down
31 changes: 0 additions & 31 deletions arch/x86/entry/calling.h
Original file line number Diff line number Diff line change
Expand Up @@ -201,37 +201,6 @@ For 32-bit we have the following conventions - kernel is built with
.byte 0xf1
.endm

#else /* CONFIG_X86_64 */

/*
* For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
* are different from the entry_32.S versions in not changing the segment
* registers. So only suitable for in kernel use, not when transitioning
* from or to user space. The resulting stack frame is not a standard
* pt_regs frame. The main use case is calling C code from assembler
* when all the registers need to be preserved.
*/

.macro SAVE_ALL
pushl %eax
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx
.endm

.macro RESTORE_ALL
popl %ebx
popl %ecx
popl %edx
popl %esi
popl %edi
popl %ebp
popl %eax
.endm

#endif /* CONFIG_X86_64 */

/*
Expand Down
106 changes: 53 additions & 53 deletions arch/x86/entry/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#include <asm/traps.h>
#include <asm/vdso.h>
#include <asm/uaccess.h>
#include <asm/cpufeature.h>

#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
Expand All @@ -44,6 +45,8 @@ __visible void enter_from_user_mode(void)
CT_WARN_ON(ct_state() != CONTEXT_USER);
user_exit();
}
#else
static inline void enter_from_user_mode(void) {}
#endif

static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
Expand Down Expand Up @@ -84,17 +87,6 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)

work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;

#ifdef CONFIG_CONTEXT_TRACKING
/*
* If TIF_NOHZ is set, we are required to call user_exit() before
* doing anything that could touch RCU.
*/
if (work & _TIF_NOHZ) {
enter_from_user_mode();
work &= ~_TIF_NOHZ;
}
#endif

#ifdef CONFIG_SECCOMP
/*
* Do seccomp first -- it should minimize exposure of other
Expand Down Expand Up @@ -171,16 +163,6 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
BUG_ON(regs != task_pt_regs(current));

/*
* If we stepped into a sysenter/syscall insn, it trapped in
* kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
* If user-mode had set TF itself, then it's still clear from
* do_debug() and we need to set it again to restore the user
* state. If we entered on the slow path, TF was already set.
*/
if (work & _TIF_SINGLESTEP)
regs->flags |= X86_EFLAGS_TF;

#ifdef CONFIG_SECCOMP
/*
* Call seccomp_phase2 before running the other hooks so that
Expand Down Expand Up @@ -268,19 +250,30 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
/* Called with IRQs disabled. */
__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
{
struct thread_info *ti = pt_regs_to_thread_info(regs);
u32 cached_flags;

if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
local_irq_disable();

lockdep_sys_exit();

cached_flags =
READ_ONCE(pt_regs_to_thread_info(regs)->flags);
cached_flags = READ_ONCE(ti->flags);

if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
exit_to_usermode_loop(regs, cached_flags);

#ifdef CONFIG_COMPAT
/*
* Compat syscalls set TS_COMPAT. Make sure we clear it before
* returning to user mode. We need to clear it *after* signal
* handling, because syscall restart has a fixup for compat
* syscalls. The fixup is exercised by the ptrace_syscall_32
* selftest.
*/
ti->status &= ~TS_COMPAT;
#endif

user_enter();
}

Expand Down Expand Up @@ -332,33 +325,45 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
syscall_slow_exit_work(regs, cached_flags);

#ifdef CONFIG_COMPAT
local_irq_disable();
prepare_exit_to_usermode(regs);
}

#ifdef CONFIG_X86_64
__visible void do_syscall_64(struct pt_regs *regs)
{
struct thread_info *ti = pt_regs_to_thread_info(regs);
unsigned long nr = regs->orig_ax;

enter_from_user_mode();
local_irq_enable();

if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
nr = syscall_trace_enter(regs);

/*
* Compat syscalls set TS_COMPAT. Make sure we clear it before
* returning to user mode.
* NB: Native and x32 syscalls are dispatched from the same
* table. The only functional difference is the x32 bit in
* regs->orig_ax, which changes the behavior of some syscalls.
*/
ti->status &= ~TS_COMPAT;
#endif
if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
regs->ax = sys_call_table[nr & __SYSCALL_MASK](
regs->di, regs->si, regs->dx,
regs->r10, regs->r8, regs->r9);
}

local_irq_disable();
prepare_exit_to_usermode(regs);
syscall_return_slowpath(regs);
}
#endif

#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
/*
* Does a 32-bit syscall. Called with IRQs on and does all entry and
* exit work and returns with IRQs off. This function is extremely hot
* in workloads that use it, and it's usually called from
* Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
* all entry and exit work and returns with IRQs off. This function is
* extremely hot in workloads that use it, and it's usually called from
* do_fast_syscall_32, so forcibly inline it to improve performance.
*/
#ifdef CONFIG_X86_32
/* 32-bit kernels use a trap gate for INT80, and the asm code calls here. */
__visible
#else
/* 64-bit kernels use do_syscall_32_irqs_off() instead. */
static
#endif
__always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
{
struct thread_info *ti = pt_regs_to_thread_info(regs);
unsigned int nr = (unsigned int)regs->orig_ax;
Expand Down Expand Up @@ -393,14 +398,13 @@ __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
syscall_return_slowpath(regs);
}

#ifdef CONFIG_X86_64
/* Handles INT80 on 64-bit kernels */
__visible void do_syscall_32_irqs_off(struct pt_regs *regs)
/* Handles int $0x80 */
__visible void do_int80_syscall_32(struct pt_regs *regs)
{
enter_from_user_mode();
local_irq_enable();
do_syscall_32_irqs_on(regs);
}
#endif

/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
__visible long do_fast_syscall_32(struct pt_regs *regs)
Expand All @@ -420,12 +424,11 @@ __visible long do_fast_syscall_32(struct pt_regs *regs)
*/
regs->ip = landing_pad;

/*
* Fetch EBP from where the vDSO stashed it.
*
* WARNING: We are in CONTEXT_USER and RCU isn't paying attention!
*/
enter_from_user_mode();

local_irq_enable();

/* Fetch EBP from where the vDSO stashed it. */
if (
#ifdef CONFIG_X86_64
/*
Expand All @@ -443,9 +446,6 @@ __visible long do_fast_syscall_32(struct pt_regs *regs)
/* User code screwed up. */
local_irq_disable();
regs->ax = -EFAULT;
#ifdef CONFIG_CONTEXT_TRACKING
enter_from_user_mode();
#endif
prepare_exit_to_usermode(regs);
return 0; /* Keep it simple: use IRET. */
}
Expand Down
Loading

0 comments on commit ba33ea8

Please sign in to comment.