Skip to content

Commit

Permalink
x86: improve bitop code generation with clang
Browse files Browse the repository at this point in the history
This uses the new ASM_INPUT_RM macro to avoid the bad code generation
issue that clang has with more generic asm inputs.

This ends up avoiding generating code like this:

 	mov    %r10,(%rsp)
 	tzcnt  (%rsp),%rcx

which now becomes just

 	tzcnt  %r10,%rcx

and in the process ends up also removing a few unnecessary stack frames
when the only use was that pointless "asm uses memory location off stack".

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Linus Torvalds committed May 22, 2024
1 parent 7453b94 commit b9b60b3
Showing 1 changed file with 5 additions and 5 deletions.
10 changes: 5 additions & 5 deletions arch/x86/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ static __always_inline unsigned long variable__ffs(unsigned long word)
{
asm("rep; bsf %1,%0"
: "=r" (word)
: "rm" (word));
: ASM_INPUT_RM (word));
return word;
}

Expand Down Expand Up @@ -297,7 +297,7 @@ static __always_inline unsigned long __fls(unsigned long word)

asm("bsr %1,%0"
: "=r" (word)
: "rm" (word));
: ASM_INPUT_RM (word));
return word;
}

Expand All @@ -320,7 +320,7 @@ static __always_inline int variable_ffs(int x)
*/
asm("bsfl %1,%0"
: "=r" (r)
: "rm" (x), "0" (-1));
: ASM_INPUT_RM (x), "0" (-1));
#elif defined(CONFIG_X86_CMOV)
asm("bsfl %1,%0\n\t"
"cmovzl %2,%0"
Expand Down Expand Up @@ -377,7 +377,7 @@ static __always_inline int fls(unsigned int x)
*/
asm("bsrl %1,%0"
: "=r" (r)
: "rm" (x), "0" (-1));
: ASM_INPUT_RM (x), "0" (-1));
#elif defined(CONFIG_X86_CMOV)
asm("bsrl %1,%0\n\t"
"cmovzl %2,%0"
Expand Down Expand Up @@ -416,7 +416,7 @@ static __always_inline int fls64(__u64 x)
*/
asm("bsrq %1,%q0"
: "+r" (bitpos)
: "rm" (x));
: ASM_INPUT_RM (x));
return bitpos + 1;
}
#else
Expand Down

0 comments on commit b9b60b3

Please sign in to comment.