Skip to content

Commit

Permalink
x86/uaccess: Move copy_user_handle_tail() into asm
Browse files Browse the repository at this point in the history
By writing the function in asm we avoid cross object code flow and
objtool no longer gets confused about a 'stray' CLAC.

Also; the asm version is actually _simpler_.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Apr 3, 2019
1 parent 8f4faed commit 3693ca8
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 47 deletions.
24 changes: 0 additions & 24 deletions arch/x86/include/asm/asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -148,30 +148,6 @@
_ASM_PTR (entry); \
.popsection

.macro ALIGN_DESTINATION
/* check for bad alignment of destination */
movl %edi,%ecx
andl $7,%ecx
jz 102f /* already aligned */
subl $8,%ecx
negl %ecx
subl %ecx,%edx
100: movb (%rsi),%al
101: movb %al,(%rdi)
incq %rsi
incq %rdi
decl %ecx
jnz 100b
102:
.section .fixup,"ax"
103: addl %ecx,%edx /* ecx is zerorest also */
jmp copy_user_handle_tail
.previous

_ASM_EXTABLE_UA(100b, 103b)
_ASM_EXTABLE_UA(101b, 103b)
.endm

#else
# define _EXPAND_EXTABLE_HANDLE(x) #x
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
Expand Down
3 changes: 0 additions & 3 deletions arch/x86/include/asm/uaccess_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -207,9 +207,6 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
return __copy_user_flushcache(dst, src, size);
}

unsigned long
copy_user_handle_tail(char *to, char *from, unsigned len);

unsigned long
mcsafe_handle_tail(char *to, char *from, unsigned len);

Expand Down
48 changes: 48 additions & 0 deletions arch/x86/lib/copy_user_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,30 @@
#include <asm/smap.h>
#include <asm/export.h>

.macro ALIGN_DESTINATION
/* check for bad alignment of destination */
movl %edi,%ecx
andl $7,%ecx
jz 102f /* already aligned */
subl $8,%ecx
negl %ecx
subl %ecx,%edx
100: movb (%rsi),%al
101: movb %al,(%rdi)
incq %rsi
incq %rdi
decl %ecx
jnz 100b
102:
.section .fixup,"ax"
103: addl %ecx,%edx /* ecx is zerorest also */
jmp copy_user_handle_tail
.previous

_ASM_EXTABLE_UA(100b, 103b)
_ASM_EXTABLE_UA(101b, 103b)
.endm

/*
* copy_user_generic_unrolled - memory copy with exception handling.
* This version is for CPUs like P4 that don't have efficient micro
Expand Down Expand Up @@ -193,6 +217,30 @@ ENTRY(copy_user_enhanced_fast_string)
ENDPROC(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)

/*
* Try to copy last bytes and clear the rest if needed.
* Since protection fault in copy_from/to_user is not a normal situation,
* it is not necessary to optimize tail handling.
*
* Input:
* rdi destination
* rsi source
* rdx count
*
* Output:
* eax uncopied bytes or 0 if successful.
*/
ALIGN;
copy_user_handle_tail:
movl %edx,%ecx
1: rep movsb
2: mov %ecx,%eax
ASM_CLAC
ret

_ASM_EXTABLE_UA(1b, 2b)
ENDPROC(copy_user_handle_tail)

/*
* copy_user_nocache - Uncached memory copy with exception handling
* This will force destination out of cache for more performance.
Expand Down
20 changes: 0 additions & 20 deletions arch/x86/lib/usercopy_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,26 +54,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
}
EXPORT_SYMBOL(clear_user);

/*
* Try to copy last bytes and clear the rest if needed.
* Since protection fault in copy_from/to_user is not a normal situation,
* it is not necessary to optimize tail handling.
*/
__visible unsigned long
copy_user_handle_tail(char *to, char *from, unsigned len)
{
for (; len; --len, to++) {
char c;

if (__get_user_nocheck(c, from++, sizeof(char)))
break;
if (__put_user_nocheck(c, to, sizeof(char)))
break;
}
clac();
return len;
}

/*
* Similar to copy_user_handle_tail, probe for the write fault point,
* but reuse __memcpy_mcsafe in case a new read error is encountered.
Expand Down

0 comments on commit 3693ca8

Please sign in to comment.