diff --git a/[refs] b/[refs] index 997e4860e736..10ecca16dfe3 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 88200bc28da38bcda1cb1bd218216e83b426d8a8 +refs/heads/master: 821508d4ef7920283b960057903505fed609fd16 diff --git a/trunk/arch/x86/kernel/entry_32.S b/trunk/arch/x86/kernel/entry_32.S index c929add475c9..899e8938e79f 100644 --- a/trunk/arch/x86/kernel/entry_32.S +++ b/trunk/arch/x86/kernel/entry_32.S @@ -442,7 +442,8 @@ sysenter_past_esp: GET_THREAD_INFO(%ebp) - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) + /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ + testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) jnz sysenter_audit sysenter_do_call: cmpl $(nr_syscalls), %eax @@ -453,7 +454,7 @@ sysenter_do_call: DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF movl TI_flags(%ebp), %ecx - testl $_TIF_ALLWORK_MASK, %ecx + testw $_TIF_ALLWORK_MASK, %cx jne sysexit_audit sysenter_exit: /* if something modifies registers it must also disable sysexit */ @@ -467,7 +468,7 @@ sysenter_exit: #ifdef CONFIG_AUDITSYSCALL sysenter_audit: - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) + testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) jnz syscall_trace_entry addl $4,%esp CFI_ADJUST_CFA_OFFSET -4 @@ -484,7 +485,7 @@ sysenter_audit: jmp sysenter_do_call sysexit_audit: - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx + testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx jne syscall_exit_work TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_ANY) @@ -497,7 +498,7 @@ sysexit_audit: DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF movl TI_flags(%ebp), %ecx - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx + testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx jne syscall_exit_work movl PT_EAX(%esp),%eax /* reload syscall return value */ jmp sysenter_exit @@ -522,7 +523,8 @@ ENTRY(system_call) SAVE_ALL GET_THREAD_INFO(%ebp) # system call tracing in operation / emulation - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) + /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ + testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) jnz syscall_trace_entry cmpl $(nr_syscalls), %eax jae syscall_badsys @@ -536,7 +538,7 @@ syscall_exit: # between sampling and the iret TRACE_IRQS_OFF movl TI_flags(%ebp), %ecx - testl $_TIF_ALLWORK_MASK, %ecx # current->work + testw $_TIF_ALLWORK_MASK, %cx # current->work jne syscall_exit_work restore_all: @@ -671,7 +673,7 @@ END(syscall_trace_entry) # perform syscall exit tracing ALIGN syscall_exit_work: - testl $_TIF_WORK_SYSCALL_EXIT, %ecx + testb $_TIF_WORK_SYSCALL_EXIT, %cl jz work_pending TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call diff --git a/trunk/arch/x86/kernel/entry_64.S b/trunk/arch/x86/kernel/entry_64.S index a331ec38af9e..7ba4621c0dfa 100644 --- a/trunk/arch/x86/kernel/entry_64.S +++ b/trunk/arch/x86/kernel/entry_64.S @@ -368,7 +368,6 @@ ENTRY(save_rest) END(save_rest) /* save complete stack frame */ - .pushsection .kprobes.text, "ax" ENTRY(save_paranoid) XCPT_FRAME 1 RDI+8 cld @@ -397,7 +396,6 @@ ENTRY(save_paranoid) 1: ret CFI_ENDPROC END(save_paranoid) - .popsection /* * A newly forked process directly context switches into this address. @@ -418,6 +416,7 @@ ENTRY(ret_from_fork) GET_THREAD_INFO(%rcx) + CFI_REMEMBER_STATE RESTORE_REST testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? @@ -429,6 +428,7 @@ ENTRY(ret_from_fork) RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET jmp ret_from_sys_call # go to the SYSRET fastpath + CFI_RESTORE_STATE CFI_ENDPROC END(ret_from_fork) diff --git a/trunk/arch/x86/lib/memcpy_64.S b/trunk/arch/x86/lib/memcpy_64.S index ad5441ed1b57..c22981fa2f3a 100644 --- a/trunk/arch/x86/lib/memcpy_64.S +++ b/trunk/arch/x86/lib/memcpy_64.S @@ -1,38 +1,30 @@ /* Copyright 2002 Andi Kleen */ #include - -#include #include +#include /* * memcpy - Copy a memory block. * - * Input: - * rdi destination - * rsi source - * rdx count - * + * Input: + * rdi destination + * rsi source + * rdx count + * * Output: * rax original destination - */ + */ -/* - * memcpy_c() - fast string ops (REP MOVSQ) based variant. - * - * Calls to this get patched into the kernel image via the - * alternative instructions framework: - */ ALIGN memcpy_c: CFI_STARTPROC - movq %rdi, %rax - - movl %edx, %ecx - shrl $3, %ecx - andl $7, %edx + movq %rdi,%rax + movl %edx,%ecx + shrl $3,%ecx + andl $7,%edx rep movsq - movl %edx, %ecx + movl %edx,%ecx rep movsb ret CFI_ENDPROC @@ -41,110 +33,99 @@ ENDPROC(memcpy_c) ENTRY(__memcpy) ENTRY(memcpy) CFI_STARTPROC + pushq %rbx + CFI_ADJUST_CFA_OFFSET 8 + CFI_REL_OFFSET rbx, 0 + movq %rdi,%rax - /* - * Put the number of full 64-byte blocks into %ecx. - * Tail portion is handled at the end: - */ - movq %rdi, %rax - movl %edx, %ecx - shrl $6, %ecx + movl %edx,%ecx + shrl $6,%ecx jz .Lhandle_tail .p2align 4 .Lloop_64: - /* - * We decrement the loop index here - and the zero-flag is - * checked at the end of the loop (instructions inbetween do - * not change the zero flag): - */ decl %ecx - /* - * Move in blocks of 4x16 bytes: - */ - movq 0*8(%rsi), %r11 - movq 1*8(%rsi), %r8 - movq %r11, 0*8(%rdi) - movq %r8, 1*8(%rdi) + movq (%rsi),%r11 + movq 8(%rsi),%r8 - movq 2*8(%rsi), %r9 - movq 3*8(%rsi), %r10 - movq %r9, 2*8(%rdi) - movq %r10, 3*8(%rdi) + movq %r11,(%rdi) + movq %r8,1*8(%rdi) - movq 4*8(%rsi), %r11 - movq 5*8(%rsi), %r8 - movq %r11, 4*8(%rdi) - movq %r8, 5*8(%rdi) + movq 2*8(%rsi),%r9 + movq 3*8(%rsi),%r10 - movq 6*8(%rsi), %r9 - movq 7*8(%rsi), %r10 - movq %r9, 6*8(%rdi) - movq %r10, 7*8(%rdi) + movq %r9,2*8(%rdi) + movq %r10,3*8(%rdi) - leaq 64(%rsi), %rsi - leaq 64(%rdi), %rdi + movq 4*8(%rsi),%r11 + movq 5*8(%rsi),%r8 + movq %r11,4*8(%rdi) + movq %r8,5*8(%rdi) + + movq 6*8(%rsi),%r9 + movq 7*8(%rsi),%r10 + + movq %r9,6*8(%rdi) + movq %r10,7*8(%rdi) + + leaq 64(%rsi),%rsi + leaq 64(%rdi),%rdi jnz .Lloop_64 .Lhandle_tail: - movl %edx, %ecx - andl $63, %ecx - shrl $3, %ecx + movl %edx,%ecx + andl $63,%ecx + shrl $3,%ecx jz .Lhandle_7 - .p2align 4 .Lloop_8: decl %ecx - movq (%rsi), %r8 - movq %r8, (%rdi) - leaq 8(%rdi), %rdi - leaq 8(%rsi), %rsi + movq (%rsi),%r8 + movq %r8,(%rdi) + leaq 8(%rdi),%rdi + leaq 8(%rsi),%rsi jnz .Lloop_8 .Lhandle_7: - movl %edx, %ecx - andl $7, %ecx - jz .Lend - + movl %edx,%ecx + andl $7,%ecx + jz .Lende .p2align 4 .Lloop_1: - movb (%rsi), %r8b - movb %r8b, (%rdi) + movb (%rsi),%r8b + movb %r8b,(%rdi) incq %rdi incq %rsi decl %ecx jnz .Lloop_1 -.Lend: +.Lende: + popq %rbx + CFI_ADJUST_CFA_OFFSET -8 + CFI_RESTORE rbx ret +.Lfinal: CFI_ENDPROC ENDPROC(memcpy) ENDPROC(__memcpy) - /* - * Some CPUs run faster using the string copy instructions. - * It is also a lot simpler. Use this when possible: - */ + /* Some CPUs run faster using the string copy instructions. + It is also a lot simpler. Use this when possible */ - .section .altinstr_replacement, "ax" + .section .altinstr_replacement,"ax" 1: .byte 0xeb /* jmp */ .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */ 2: .previous - - .section .altinstructions, "a" + .section .altinstructions,"a" .align 8 .quad memcpy .quad 1b .byte X86_FEATURE_REP_GOOD - - /* - * Replace only beginning, memcpy is used to apply alternatives, - * so it is silly to overwrite itself with nops - reboot is the - * only outcome... - */ + /* Replace only beginning, memcpy is used to apply alternatives, so it + * is silly to overwrite itself with nops - reboot is only outcome... */ .byte 2b - 1b .byte 2b - 1b .previous diff --git a/trunk/arch/x86/pci/common.c b/trunk/arch/x86/pci/common.c index 82d22fc601ae..8c362b96b644 100644 --- a/trunk/arch/x86/pci/common.c +++ b/trunk/arch/x86/pci/common.c @@ -90,7 +90,7 @@ static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitdata = { +static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitconst = { /* * Systems where PCI IO resource ISA alignment can be skipped * when the ISA enable bit in the bridge control is not set @@ -183,7 +183,7 @@ static int __devinit assign_all_busses(const struct dmi_system_id *d) } #endif -static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { +static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = { #ifdef __i386__ /* * Laptops which need pci=assign-busses to see Cardbus cards diff --git a/trunk/arch/x86/pci/fixup.c b/trunk/arch/x86/pci/fixup.c index 7d388d5cf548..9c49919e4d1c 100644 --- a/trunk/arch/x86/pci/fixup.c +++ b/trunk/arch/x86/pci/fixup.c @@ -356,7 +356,7 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev) DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); -static struct dmi_system_id __devinitdata msi_k8t_dmi_table[] = { +static const struct dmi_system_id __devinitconst msi_k8t_dmi_table[] = { { .ident = "MSI-K8T-Neo2Fir", .matches = { @@ -413,7 +413,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, */ static u16 toshiba_line_size; -static struct dmi_system_id __devinitdata toshiba_ohci1394_dmi_table[] = { +static const struct dmi_system_id __devinitconst toshiba_ohci1394_dmi_table[] = { { .ident = "Toshiba PS5 based laptop", .matches = {