From b0430f39de089920e3aab3f4a9c35c35110bdbea Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 23 Jan 2025 13:29:03 -0800 Subject: [PATCH 1/2] lib/crc: simplify the kconfig options for CRC implementations Make the following simplifications to the kconfig options for choosing CRC implementations for CRC32 and CRC_T10DIF: 1. Make the option to disable the arch-optimized code be visible only when CONFIG_EXPERT=y. 2. Make a single option control the inclusion of the arch-optimized code for all enabled CRC variants. 3. Make CRC32_SARWATE (a.k.a. slice-by-1 or byte-by-byte) be the only generic CRC32 implementation. The result is there is now just one option, CRC_OPTIMIZATIONS, which is default y and can be disabled only when CONFIG_EXPERT=y. Rationale: 1. Enabling the arch-optimized code is nearly always the right choice. However, people trying to build the tiniest kernel possible would find some use in disabling it. Anything we add to CRC32 is de facto unconditional, given that CRC32 gets selected by something in nearly all kernels. And unfortunately enabling the arch CRC code does not eliminate the need to build the generic CRC code into the kernel too, due to CPU feature dependencies. The size of the arch CRC code will also increase slightly over time as more CRC variants get added and more implementations targeting different instruction set extensions get added. Thus, it seems worthwhile to still provide an option to disable it, but it should be considered an expert-level tweak. 2. Considering the use case described in (1), there doesn't seem to be sufficient value in making the arch-optimized CRC code be independently configurable for different CRC variants. Note also that multiple variants were already grouped together, e.g. CONFIG_CRC32 actually enables three different variants of CRC32. 3. The bit-by-bit implementation is uselessly slow, whereas slice-by-n for n=4 and n=8 use tables that are inconveniently large: 4096 bytes and 8192 bytes respectively, compared to 1024 bytes for n=1. Higher n gives higher instruction-level parallelism, so higher n easily wins on traditional microbenchmarks on most CPUs. However, the larger tables, which are accessed randomly, can be harmful in real-world situations where the dcache may be cold or useful data may need be evicted from the dcache. Meanwhile, today most architectures have much faster CRC32 implementations using dedicated CRC32 instructions or carryless multiplication instructions anyway, which make the generic code obsolete in most cases especially on long messages. Another reason for going with n=1 is that this is already what is used by all the other CRC variants in the kernel. CRC32 was unique in having support for larger tables. But as per the above this can be considered an outdated optimization. The standardization on slice-by-1 a.k.a. CRC32_SARWATE makes much of the code in lib/crc32.c unused. A later patch will clean that up. Link: https://lore.kernel.org/r/20250123212904.118683-2-ebiggers@kernel.org Reviewed-by: Ard Biesheuvel Reviewed-by: Martin K. Petersen Signed-off-by: Eric Biggers --- lib/Kconfig | 116 +++++++--------------------------------------------- 1 file changed, 14 insertions(+), 102 deletions(-) diff --git a/lib/Kconfig b/lib/Kconfig index a78d22c6507f..e08b26e8e03f 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -164,34 +164,9 @@ config CRC_T10DIF config ARCH_HAS_CRC_T10DIF bool -choice - prompt "CRC-T10DIF implementation" - depends on CRC_T10DIF - default CRC_T10DIF_IMPL_ARCH if ARCH_HAS_CRC_T10DIF - default CRC_T10DIF_IMPL_GENERIC if !ARCH_HAS_CRC_T10DIF - help - This option allows you to override the default choice of CRC-T10DIF - implementation. - -config CRC_T10DIF_IMPL_ARCH - bool "Architecture-optimized" if ARCH_HAS_CRC_T10DIF - help - Use the optimized implementation of CRC-T10DIF for the selected - architecture. It is recommended to keep this enabled, as it can - greatly improve CRC-T10DIF performance. - -config CRC_T10DIF_IMPL_GENERIC - bool "Generic implementation" - help - Use the generic table-based implementation of CRC-T10DIF. Selecting - this will reduce code size slightly but can greatly reduce CRC-T10DIF - performance. - -endchoice - config CRC_T10DIF_ARCH tristate - default CRC_T10DIF if CRC_T10DIF_IMPL_ARCH + default CRC_T10DIF if ARCH_HAS_CRC_T10DIF && CRC_OPTIMIZATIONS config CRC64_ROCKSOFT tristate "CRC calculation for the Rocksoft model CRC64" @@ -214,6 +189,7 @@ config CRC32 tristate "CRC32/CRC32c functions" default y select BITREVERSE + select CRC32_SARWATE help This option is provided for the case where no in-kernel-tree modules require CRC32/CRC32c functions, but a module built outside @@ -223,87 +199,12 @@ config CRC32 config ARCH_HAS_CRC32 bool -choice - prompt "CRC32 implementation" - depends on CRC32 - default CRC32_IMPL_ARCH_PLUS_SLICEBY8 if ARCH_HAS_CRC32 - default CRC32_IMPL_SLICEBY8 if !ARCH_HAS_CRC32 - help - This option allows you to override the default choice of CRC32 - implementation. Choose the default unless you know that you need one - of the others. - -config CRC32_IMPL_ARCH_PLUS_SLICEBY8 - bool "Arch-optimized, with fallback to slice-by-8" if ARCH_HAS_CRC32 - help - Use architecture-optimized implementation of CRC32. Fall back to - slice-by-8 in cases where the arch-optimized implementation cannot be - used, e.g. if the CPU lacks support for the needed instructions. - - This is the default when an arch-optimized implementation exists. - -config CRC32_IMPL_ARCH_PLUS_SLICEBY1 - bool "Arch-optimized, with fallback to slice-by-1" if ARCH_HAS_CRC32 - help - Use architecture-optimized implementation of CRC32, but fall back to - slice-by-1 instead of slice-by-8 in order to reduce the binary size. - -config CRC32_IMPL_SLICEBY8 - bool "Slice by 8 bytes" - help - Calculate checksum 8 bytes at a time with a clever slicing algorithm. - This is much slower than the architecture-optimized implementation of - CRC32 (if the selected arch has one), but it is portable and is the - fastest implementation when no arch-optimized implementation is - available. It uses an 8KiB lookup table. Most modern processors have - enough cache to hold this table without thrashing the cache. - -config CRC32_IMPL_SLICEBY4 - bool "Slice by 4 bytes" - help - Calculate checksum 4 bytes at a time with a clever slicing algorithm. - This is a bit slower than slice by 8, but has a smaller 4KiB lookup - table. - - Only choose this option if you know what you are doing. - -config CRC32_IMPL_SLICEBY1 - bool "Slice by 1 byte (Sarwate's algorithm)" - help - Calculate checksum a byte at a time using Sarwate's algorithm. This - is not particularly fast, but has a small 1KiB lookup table. - - Only choose this option if you know what you are doing. - -config CRC32_IMPL_BIT - bool "Classic Algorithm (one bit at a time)" - help - Calculate checksum one bit at a time. This is VERY slow, but has - no lookup table. This is provided as a debugging option. - - Only choose this option if you are debugging crc32. - -endchoice - config CRC32_ARCH tristate - default CRC32 if CRC32_IMPL_ARCH_PLUS_SLICEBY8 || CRC32_IMPL_ARCH_PLUS_SLICEBY1 - -config CRC32_SLICEBY8 - bool - default y if CRC32_IMPL_SLICEBY8 || CRC32_IMPL_ARCH_PLUS_SLICEBY8 - -config CRC32_SLICEBY4 - bool - default y if CRC32_IMPL_SLICEBY4 + default CRC32 if ARCH_HAS_CRC32 && CRC_OPTIMIZATIONS config CRC32_SARWATE bool - default y if CRC32_IMPL_SLICEBY1 || CRC32_IMPL_ARCH_PLUS_SLICEBY1 - -config CRC32_BIT - bool - default y if CRC32_IMPL_BIT config CRC64 tristate "CRC64 functions" @@ -343,6 +244,17 @@ config CRC8 when they need to do cyclic redundancy check according CRC8 algorithm. Module will be called crc8. +config CRC_OPTIMIZATIONS + bool "Enable optimized CRC implementations" if EXPERT + default y + help + Disabling this option reduces code size slightly by disabling the + architecture-optimized implementations of any CRC variants that are + enabled. CRC checksumming performance may get much slower. + + Keep this enabled unless you're really trying to minimize the size of + the kernel. + config XXHASH tristate From 5e3c1c48fac3793c173567df735890d4e29cbb64 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 23 Jan 2025 13:29:04 -0800 Subject: [PATCH 2/2] lib/crc32: remove other generic implementations Now that we've standardized on the byte-by-byte implementation of CRC32 as the only generic implementation (see previous commit for the rationale), remove the code for the other implementations. Tested with crc_kunit. Link: https://lore.kernel.org/r/20250123212904.118683-3-ebiggers@kernel.org Reviewed-by: Ard Biesheuvel Reviewed-by: Martin K. Petersen Signed-off-by: Eric Biggers --- lib/Kconfig | 4 - lib/crc32.c | 225 ++----------------------------------------- lib/crc32defs.h | 59 ------------ lib/gen_crc32table.c | 113 ++++++---------------- 4 files changed, 40 insertions(+), 361 deletions(-) delete mode 100644 lib/crc32defs.h diff --git a/lib/Kconfig b/lib/Kconfig index e08b26e8e03f..dccb61b7d698 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -189,7 +189,6 @@ config CRC32 tristate "CRC32/CRC32c functions" default y select BITREVERSE - select CRC32_SARWATE help This option is provided for the case where no in-kernel-tree modules require CRC32/CRC32c functions, but a module built outside @@ -203,9 +202,6 @@ config CRC32_ARCH tristate default CRC32 if ARCH_HAS_CRC32 && CRC_OPTIMIZATIONS -config CRC32_SARWATE - bool - config CRC64 tristate "CRC64 functions" help diff --git a/lib/crc32.c b/lib/crc32.c index 47151624332e..ede6131f66fc 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -30,20 +30,6 @@ #include #include #include -#include -#include "crc32defs.h" - -#if CRC_LE_BITS > 8 -# define tole(x) ((__force u32) cpu_to_le32(x)) -#else -# define tole(x) (x) -#endif - -#if CRC_BE_BITS > 8 -# define tobe(x) ((__force u32) cpu_to_be32(x)) -#else -# define tobe(x) (x) -#endif #include "crc32table.h" @@ -51,157 +37,20 @@ MODULE_AUTHOR("Matt Domsch "); MODULE_DESCRIPTION("Various CRC32 calculations"); MODULE_LICENSE("GPL"); -#if CRC_LE_BITS > 8 || CRC_BE_BITS > 8 - -/* implements slicing-by-4 or slicing-by-8 algorithm */ -static inline u32 __pure -crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) -{ -# ifdef __LITTLE_ENDIAN -# define DO_CRC(x) crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8) -# define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \ - t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255]) -# define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \ - t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255]) -# else -# define DO_CRC(x) crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) -# define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \ - t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255]) -# define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \ - t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255]) -# endif - const u32 *b; - size_t rem_len; -# ifdef CONFIG_X86 - size_t i; -# endif - const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3]; -# if CRC_LE_BITS != 32 - const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7]; -# endif - u32 q; - - /* Align it */ - if (unlikely((long)buf & 3 && len)) { - do { - DO_CRC(*buf++); - } while ((--len) && ((long)buf)&3); - } - -# if CRC_LE_BITS == 32 - rem_len = len & 3; - len = len >> 2; -# else - rem_len = len & 7; - len = len >> 3; -# endif - - b = (const u32 *)buf; -# ifdef CONFIG_X86 - --b; - for (i = 0; i < len; i++) { -# else - for (--b; len; --len) { -# endif - q = crc ^ *++b; /* use pre increment for speed */ -# if CRC_LE_BITS == 32 - crc = DO_CRC4; -# else - crc = DO_CRC8; - q = *++b; - crc ^= DO_CRC4; -# endif - } - len = rem_len; - /* And the last few bytes */ - if (len) { - u8 *p = (u8 *)(b + 1) - 1; -# ifdef CONFIG_X86 - for (i = 0; i < len; i++) - DO_CRC(*++p); /* use pre increment for speed */ -# else - do { - DO_CRC(*++p); /* use pre increment for speed */ - } while (--len); -# endif - } - return crc; -#undef DO_CRC -#undef DO_CRC4 -#undef DO_CRC8 -} -#endif - - -/** - * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II - * CRC32/CRC32C - * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for other - * uses, or the previous crc32/crc32c value if computing incrementally. - * @p: pointer to buffer over which CRC32/CRC32C is run - * @len: length of buffer @p - * @tab: little-endian Ethernet table - * @polynomial: CRC32/CRC32c LE polynomial - */ -static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p, - size_t len, const u32 (*tab)[256], - u32 polynomial) +u32 __pure crc32_le_base(u32 crc, const u8 *p, size_t len) { -#if CRC_LE_BITS == 1 - int i; - while (len--) { - crc ^= *p++; - for (i = 0; i < 8; i++) - crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0); - } -# elif CRC_LE_BITS == 2 - while (len--) { - crc ^= *p++; - crc = (crc >> 2) ^ tab[0][crc & 3]; - crc = (crc >> 2) ^ tab[0][crc & 3]; - crc = (crc >> 2) ^ tab[0][crc & 3]; - crc = (crc >> 2) ^ tab[0][crc & 3]; - } -# elif CRC_LE_BITS == 4 - while (len--) { - crc ^= *p++; - crc = (crc >> 4) ^ tab[0][crc & 15]; - crc = (crc >> 4) ^ tab[0][crc & 15]; - } -# elif CRC_LE_BITS == 8 - /* aka Sarwate algorithm */ - while (len--) { - crc ^= *p++; - crc = (crc >> 8) ^ tab[0][crc & 255]; - } -# else - crc = (__force u32) __cpu_to_le32(crc); - crc = crc32_body(crc, p, len, tab); - crc = __le32_to_cpu((__force __le32)crc); -#endif + while (len--) + crc = (crc >> 8) ^ crc32table_le[(crc & 255) ^ *p++]; return crc; } +EXPORT_SYMBOL(crc32_le_base); -#if CRC_LE_BITS == 1 -u32 __pure crc32_le_base(u32 crc, const u8 *p, size_t len) -{ - return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE); -} -u32 __pure crc32c_le_base(u32 crc, const u8 *p, size_t len) -{ - return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE); -} -#else -u32 __pure crc32_le_base(u32 crc, const u8 *p, size_t len) -{ - return crc32_le_generic(crc, p, len, crc32table_le, CRC32_POLY_LE); -} u32 __pure crc32c_le_base(u32 crc, const u8 *p, size_t len) { - return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE); + while (len--) + crc = (crc >> 8) ^ crc32ctable_le[(crc & 255) ^ *p++]; + return crc; } -#endif -EXPORT_SYMBOL(crc32_le_base); EXPORT_SYMBOL(crc32c_le_base); /* @@ -277,64 +126,10 @@ u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len) EXPORT_SYMBOL(crc32_le_shift); EXPORT_SYMBOL(__crc32c_le_shift); -/** - * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 - * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for - * other uses, or the previous crc32 value if computing incrementally. - * @p: pointer to buffer over which CRC32 is run - * @len: length of buffer @p - * @tab: big-endian Ethernet table - * @polynomial: CRC32 BE polynomial - */ -static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p, - size_t len, const u32 (*tab)[256], - u32 polynomial) -{ -#if CRC_BE_BITS == 1 - int i; - while (len--) { - crc ^= *p++ << 24; - for (i = 0; i < 8; i++) - crc = - (crc << 1) ^ ((crc & 0x80000000) ? polynomial : - 0); - } -# elif CRC_BE_BITS == 2 - while (len--) { - crc ^= *p++ << 24; - crc = (crc << 2) ^ tab[0][crc >> 30]; - crc = (crc << 2) ^ tab[0][crc >> 30]; - crc = (crc << 2) ^ tab[0][crc >> 30]; - crc = (crc << 2) ^ tab[0][crc >> 30]; - } -# elif CRC_BE_BITS == 4 - while (len--) { - crc ^= *p++ << 24; - crc = (crc << 4) ^ tab[0][crc >> 28]; - crc = (crc << 4) ^ tab[0][crc >> 28]; - } -# elif CRC_BE_BITS == 8 - while (len--) { - crc ^= *p++ << 24; - crc = (crc << 8) ^ tab[0][crc >> 24]; - } -# else - crc = (__force u32) __cpu_to_be32(crc); - crc = crc32_body(crc, p, len, tab); - crc = __be32_to_cpu((__force __be32)crc); -# endif - return crc; -} - -#if CRC_BE_BITS == 1 -u32 __pure crc32_be_base(u32 crc, const u8 *p, size_t len) -{ - return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE); -} -#else u32 __pure crc32_be_base(u32 crc, const u8 *p, size_t len) { - return crc32_be_generic(crc, p, len, crc32table_be, CRC32_POLY_BE); + while (len--) + crc = (crc << 8) ^ crc32table_be[(crc >> 24) ^ *p++]; + return crc; } -#endif EXPORT_SYMBOL(crc32_be_base); diff --git a/lib/crc32defs.h b/lib/crc32defs.h deleted file mode 100644 index 0c8fb5923e7e..000000000000 --- a/lib/crc32defs.h +++ /dev/null @@ -1,59 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* Try to choose an implementation variant via Kconfig */ -#ifdef CONFIG_CRC32_SLICEBY8 -# define CRC_LE_BITS 64 -# define CRC_BE_BITS 64 -#endif -#ifdef CONFIG_CRC32_SLICEBY4 -# define CRC_LE_BITS 32 -# define CRC_BE_BITS 32 -#endif -#ifdef CONFIG_CRC32_SARWATE -# define CRC_LE_BITS 8 -# define CRC_BE_BITS 8 -#endif -#ifdef CONFIG_CRC32_BIT -# define CRC_LE_BITS 1 -# define CRC_BE_BITS 1 -#endif - -/* - * How many bits at a time to use. Valid values are 1, 2, 4, 8, 32 and 64. - * For less performance-sensitive, use 4 or 8 to save table size. - * For larger systems choose same as CPU architecture as default. - * This works well on X86_64, SPARC64 systems. This may require some - * elaboration after experiments with other architectures. - */ -#ifndef CRC_LE_BITS -# ifdef CONFIG_64BIT -# define CRC_LE_BITS 64 -# else -# define CRC_LE_BITS 32 -# endif -#endif -#ifndef CRC_BE_BITS -# ifdef CONFIG_64BIT -# define CRC_BE_BITS 64 -# else -# define CRC_BE_BITS 32 -# endif -#endif - -/* - * Little-endian CRC computation. Used with serial bit streams sent - * lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC. - */ -#if CRC_LE_BITS > 64 || CRC_LE_BITS < 1 || CRC_LE_BITS == 16 || \ - CRC_LE_BITS & CRC_LE_BITS-1 -# error "CRC_LE_BITS must be one of {1, 2, 4, 8, 32, 64}" -#endif - -/* - * Big-endian CRC computation. Used with serial bit streams sent - * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC. - */ -#if CRC_BE_BITS > 64 || CRC_BE_BITS < 1 || CRC_BE_BITS == 16 || \ - CRC_BE_BITS & CRC_BE_BITS-1 -# error "CRC_BE_BITS must be one of {1, 2, 4, 8, 32, 64}" -#endif diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c index f755b997b967..6d03425b849e 100644 --- a/lib/gen_crc32table.c +++ b/lib/gen_crc32table.c @@ -2,30 +2,11 @@ #include #include "../include/linux/crc32poly.h" #include "../include/generated/autoconf.h" -#include "crc32defs.h" #include -#define ENTRIES_PER_LINE 4 - -#if CRC_LE_BITS > 8 -# define LE_TABLE_ROWS (CRC_LE_BITS/8) -# define LE_TABLE_SIZE 256 -#else -# define LE_TABLE_ROWS 1 -# define LE_TABLE_SIZE (1 << CRC_LE_BITS) -#endif - -#if CRC_BE_BITS > 8 -# define BE_TABLE_ROWS (CRC_BE_BITS/8) -# define BE_TABLE_SIZE 256 -#else -# define BE_TABLE_ROWS 1 -# define BE_TABLE_SIZE (1 << CRC_BE_BITS) -#endif - -static uint32_t crc32table_le[LE_TABLE_ROWS][256]; -static uint32_t crc32table_be[BE_TABLE_ROWS][256]; -static uint32_t crc32ctable_le[LE_TABLE_ROWS][256]; +static uint32_t crc32table_le[256]; +static uint32_t crc32table_be[256]; +static uint32_t crc32ctable_le[256]; /** * crc32init_le() - allocate and initialize LE table data @@ -34,25 +15,17 @@ static uint32_t crc32ctable_le[LE_TABLE_ROWS][256]; * fact that crctable[i^j] = crctable[i] ^ crctable[j]. * */ -static void crc32init_le_generic(const uint32_t polynomial, - uint32_t (*tab)[256]) +static void crc32init_le_generic(const uint32_t polynomial, uint32_t tab[256]) { unsigned i, j; uint32_t crc = 1; - tab[0][0] = 0; + tab[0] = 0; - for (i = LE_TABLE_SIZE >> 1; i; i >>= 1) { + for (i = 128; i; i >>= 1) { crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0); - for (j = 0; j < LE_TABLE_SIZE; j += 2 * i) - tab[0][i + j] = crc ^ tab[0][j]; - } - for (i = 0; i < LE_TABLE_SIZE; i++) { - crc = tab[0][i]; - for (j = 1; j < LE_TABLE_ROWS; j++) { - crc = tab[0][crc & 0xff] ^ (crc >> 8); - tab[j][i] = crc; - } + for (j = 0; j < 256; j += 2 * i) + tab[i + j] = crc ^ tab[j]; } } @@ -74,34 +47,22 @@ static void crc32init_be(void) unsigned i, j; uint32_t crc = 0x80000000; - crc32table_be[0][0] = 0; + crc32table_be[0] = 0; - for (i = 1; i < BE_TABLE_SIZE; i <<= 1) { + for (i = 1; i < 256; i <<= 1) { crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0); for (j = 0; j < i; j++) - crc32table_be[0][i + j] = crc ^ crc32table_be[0][j]; - } - for (i = 0; i < BE_TABLE_SIZE; i++) { - crc = crc32table_be[0][i]; - for (j = 1; j < BE_TABLE_ROWS; j++) { - crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8); - crc32table_be[j][i] = crc; - } + crc32table_be[i + j] = crc ^ crc32table_be[j]; } } -static void output_table(uint32_t (*table)[256], int rows, int len, char *trans) +static void output_table(const uint32_t table[256]) { - int i, j; - - for (j = 0 ; j < rows; j++) { - printf("{"); - for (i = 0; i < len - 1; i++) { - if (i % ENTRIES_PER_LINE == 0) - printf("\n"); - printf("%s(0x%8.8xL), ", trans, table[j][i]); - } - printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]); + int i; + + for (i = 0; i < 256; i += 4) { + printf("\t0x%08x, 0x%08x, 0x%08x, 0x%08x,\n", + table[i], table[i + 1], table[i + 2], table[i + 3]); } } @@ -109,34 +70,20 @@ int main(int argc, char** argv) { printf("/* this file is generated - do not edit */\n\n"); - if (CRC_LE_BITS > 1) { - crc32init_le(); - printf("static const u32 ____cacheline_aligned " - "crc32table_le[%d][%d] = {", - LE_TABLE_ROWS, LE_TABLE_SIZE); - output_table(crc32table_le, LE_TABLE_ROWS, - LE_TABLE_SIZE, "tole"); - printf("};\n"); - } + crc32init_le(); + printf("static const u32 ____cacheline_aligned crc32table_le[256] = {\n"); + output_table(crc32table_le); + printf("};\n"); - if (CRC_BE_BITS > 1) { - crc32init_be(); - printf("static const u32 ____cacheline_aligned " - "crc32table_be[%d][%d] = {", - BE_TABLE_ROWS, BE_TABLE_SIZE); - output_table(crc32table_be, LE_TABLE_ROWS, - BE_TABLE_SIZE, "tobe"); - printf("};\n"); - } - if (CRC_LE_BITS > 1) { - crc32cinit_le(); - printf("static const u32 ____cacheline_aligned " - "crc32ctable_le[%d][%d] = {", - LE_TABLE_ROWS, LE_TABLE_SIZE); - output_table(crc32ctable_le, LE_TABLE_ROWS, - LE_TABLE_SIZE, "tole"); - printf("};\n"); - } + crc32init_be(); + printf("static const u32 ____cacheline_aligned crc32table_be[256] = {\n"); + output_table(crc32table_be); + printf("};\n"); + + crc32cinit_le(); + printf("static const u32 ____cacheline_aligned crc32ctable_le[256] = {\n"); + output_table(crc32ctable_le); + printf("};\n"); return 0; }