Skip to content

Commit

Permalink
x86/csum: Remove unnecessary odd handling
Browse files Browse the repository at this point in the history
The special case for odd aligned buffers is unnecessary and mostly
just adds overhead. Aligned buffers is the expectations, and even for
unaligned buffer, the only case that was helped is if the buffer was
1-byte from word aligned which is ~1/7 of the cases. Overall it seems
highly unlikely to be worth to extra branch.

It was left in the previous perf improvement patch because I was
erroneously comparing the exact output of `csum_partial(...)`, but
really we only need `csum_fold(csum_partial(...))` to match so its
safe to remove.

All csum kunit tests pass.

Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: David Laight <david.laight@aculab.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Noah Goldstein authored and Linus Torvalds committed Jan 4, 2024
1 parent 5eff55d commit 5d4acb6
Showing 1 changed file with 4 additions and 32 deletions.
36 changes: 4 additions & 32 deletions arch/x86/lib/csum-partial_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,26 +11,9 @@
#include <asm/checksum.h>
#include <asm/word-at-a-time.h>

static inline unsigned short from32to16(unsigned a)
static inline __wsum csum_finalize_sum(u64 temp64)
{
unsigned short b = a >> 16;
asm("addw %w2,%w0\n\t"
"adcw $0,%w0\n"
: "=r" (b)
: "0" (b), "r" (a));
return b;
}

static inline __wsum csum_tail(u64 temp64, int odd)
{
unsigned int result;

result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
if (unlikely(odd)) {
result = from32to16(result);
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
}
return (__force __wsum)result;
return (__force __wsum)((temp64 + ror64(temp64, 32)) >> 32);
}

/*
Expand All @@ -47,17 +30,6 @@ static inline __wsum csum_tail(u64 temp64, int odd)
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
u64 temp64 = (__force u64)sum;
unsigned odd;

odd = 1 & (unsigned long) buff;
if (unlikely(odd)) {
if (unlikely(len == 0))
return sum;
temp64 = ror32((__force u32)sum, 8);
temp64 += (*(unsigned char *)buff << 8);
len--;
buff++;
}

/*
* len == 40 is the hot case due to IPv6 headers, but annotating it likely()
Expand All @@ -73,7 +45,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
"adcq $0,%[res]"
: [res] "+r"(temp64)
: [src] "r"(buff), "m"(*(const char(*)[40])buff));
return csum_tail(temp64, odd);
return csum_finalize_sum(temp64);
}
if (unlikely(len >= 64)) {
/*
Expand Down Expand Up @@ -143,7 +115,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
: [res] "+r"(temp64)
: [trail] "r"(trail));
}
return csum_tail(temp64, odd);
return csum_finalize_sum(temp64);
}
EXPORT_SYMBOL(csum_partial);

Expand Down

0 comments on commit 5d4acb6

Please sign in to comment.