Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
89b1586
Documentation
LICENSES
arch
block
certs
crypto
drivers
fs
include
init
ipc
kernel
lib
842
crypto
dim
fonts
kunit
livepatch
lz4
Makefile
lz4_compress.c
lz4_decompress.c
lz4defs.h
lz4hc_compress.c
lzo
math
mpi
pldmfw
raid6
reed_solomon
vdso
xz
zlib_deflate
zlib_dfltcc
zlib_inflate
zstd
.gitignore
Kconfig
Kconfig.debug
Kconfig.kasan
Kconfig.kcsan
Kconfig.kgdb
Kconfig.ubsan
Makefile
argv_split.c
ashldi3.c
ashrdi3.c
asn1_decoder.c
assoc_array.c
atomic64.c
atomic64_test.c
audit.c
bcd.c
bch.c
bitfield_kunit.c
bitmap.c
bitrev.c
bootconfig.c
bsearch.c
btree.c
bucket_locks.c
bug.c
build_OID_registry
bust_spinlocks.c
check_signature.c
checksum.c
clz_ctz.c
clz_tab.c
cmdline.c
cmdline_kunit.c
cmpdi2.c
compat_audit.c
cpu_rmap.c
cpumask.c
crc-ccitt.c
crc-itu-t.c
crc-t10dif.c
crc16.c
crc32.c
crc32defs.h
crc32test.c
crc4.c
crc64.c
crc7.c
crc8.c
ctype.c
debug_info.c
debug_locks.c
debugobjects.c
dec_and_lock.c
decompress.c
decompress_bunzip2.c
decompress_inflate.c
decompress_unlz4.c
decompress_unlzma.c
decompress_unlzo.c
decompress_unxz.c
decompress_unzstd.c
devres.c
digsig.c
dump_stack.c
dynamic_debug.c
dynamic_queue_limits.c
earlycpio.c
errname.c
error-inject.c
errseq.c
extable.c
fault-inject-usercopy.c
fault-inject.c
fdt.c
fdt_addresses.c
fdt_empty_tree.c
fdt_ro.c
fdt_rw.c
fdt_strerror.c
fdt_sw.c
fdt_wip.c
find_bit.c
find_bit_benchmark.c
flex_proportions.c
gen_crc32table.c
gen_crc64table.c
genalloc.c
generic-radix-tree.c
glob.c
globtest.c
hexdump.c
hweight.c
idr.c
inflate.c
interval_tree.c
interval_tree_test.c
iomap.c
iomap_copy.c
iommu-helper.c
iov_iter.c
irq_poll.c
irq_regs.c
is_single_threaded.c
kasprintf.c
kfifo.c
klist.c
kobject.c
kobject_uevent.c
kstrtox.c
kstrtox.h
libcrc32c.c
linear_ranges.c
list-test.c
list_debug.c
list_sort.c
llist.c
locking-selftest-hardirq.h
locking-selftest-mutex.h
locking-selftest-rlock-hardirq.h
locking-selftest-rlock-softirq.h
locking-selftest-rlock.h
locking-selftest-rsem.h
locking-selftest-rtmutex.h
locking-selftest-softirq.h
locking-selftest-spin-hardirq.h
locking-selftest-spin-softirq.h
locking-selftest-spin.h
locking-selftest-wlock-hardirq.h
locking-selftest-wlock-softirq.h
locking-selftest-wlock.h
locking-selftest-wsem.h
locking-selftest.c
lockref.c
logic_pio.c
lru_cache.c
lshrdi3.c
memcat_p.c
memory-notifier-error-inject.c
memregion.c
memweight.c
muldi3.c
net_utils.c
netdev-notifier-error-inject.c
nlattr.c
nmi_backtrace.c
nodemask.c
notifier-error-inject.c
notifier-error-inject.h
objagg.c
of-reconfig-notifier-error-inject.c
oid_registry.c
once.c
packing.c
parman.c
parser.c
pci_iomap.c
percpu-refcount.c
percpu_counter.c
percpu_test.c
plist.c
pm-notifier-error-inject.c
radix-tree.c
random32.c
ratelimit.c
rbtree.c
rbtree_test.c
refcount.c
rhashtable.c
sbitmap.c
scatterlist.c
seq_buf.c
sg_pool.c
sg_split.c
sha1.c
show_mem.c
siphash.c
smp_processor_id.c
sort.c
stackdepot.c
stmp_device.c
string.c
string_helpers.c
strncpy_from_user.c
strnlen_user.c
syscall.c
test-kstrtox.c
test-string_helpers.c
test_bitmap.c
test_bitops.c
test_bits.c
test_blackhole_dev.c
test_bpf.c
test_debug_virtual.c
test_firmware.c
test_fpu.c
test_free_pages.c
test_hash.c
test_hexdump.c
test_hmm.c
test_hmm_uapi.h
test_ida.c
test_kasan.c
test_kasan_module.c
test_kmod.c
test_linear_ranges.c
test_list_sort.c
test_lockup.c
test_memcat_p.c
test_meminit.c
test_min_heap.c
test_module.c
test_objagg.c
test_overflow.c
test_parman.c
test_printf.c
test_rhashtable.c
test_siphash.c
test_sort.c
test_stackinit.c
test_static_key_base.c
test_static_keys.c
test_string.c
test_strscpy.c
test_sysctl.c
test_ubsan.c
test_user_copy.c
test_uuid.c
test_vmalloc.c
test_xarray.c
textsearch.c
timerqueue.c
ts_bm.c
ts_fsm.c
ts_kmp.c
ubsan.c
ubsan.h
ucmpdi2.c
ucs2_string.c
usercopy.c
uuid.c
vsprintf.c
win_minmax.c
xarray.c
xxhash.c
mm
net
samples
scripts
security
sound
tools
usr
virt
.clang-format
.cocciconfig
.get_maintainer.ignore
.gitattributes
.gitignore
.mailmap
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
Breadcrumbs
linux
/
lib
/
lz4
/
lz4_decompress.c
Copy path
Blame
Blame
Latest commit
History
History
716 lines (640 loc) · 20.3 KB
Breadcrumbs
linux
/
lib
/
lz4
/
lz4_decompress.c
Top
File metadata and controls
Code
Blame
716 lines (640 loc) · 20.3 KB
Raw
/* * LZ4 - Fast LZ compression algorithm * Copyright (C) 2011 - 2016, Yann Collet. * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php) * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * You can contact the author at : * - LZ4 homepage : http://www.lz4.org * - LZ4 source repository : https://github.com/lz4/lz4 * * Changed for kernel usage by: * Sven Schmidt <4sschmid@informatik.uni-hamburg.de> */ /*-************************************ * Dependencies **************************************/ #include <linux/lz4.h> #include "lz4defs.h" #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <asm/unaligned.h> /*-***************************** * Decompression functions *******************************/ #define DEBUGLOG(l, ...) {} /* disabled */ #ifndef assert #define assert(condition) ((void)0) #endif /* * LZ4_decompress_generic() : * This generic decompression function covers all use cases. * It shall be instantiated several times, using different sets of directives. * Note that it is important for performance that this function really get inlined, * in order to remove useless branches during compilation optimization. */ static FORCE_INLINE int LZ4_decompress_generic( const char * const src, char * const dst, int srcSize, /* * If endOnInput == endOnInputSize, * this value is `dstCapacity` */ int outputSize, /* endOnOutputSize, endOnInputSize */ endCondition_directive endOnInput, /* full, partial */ earlyEnd_directive partialDecoding, /* noDict, withPrefix64k, usingExtDict */ dict_directive dict, /* always <= dst, == dst when no prefix */ const BYTE * const lowPrefix, /* only if dict == usingExtDict */ const BYTE * const dictStart, /* note : = 0 if noDict */ const size_t dictSize ) { const BYTE *ip = (const BYTE *) src; const BYTE * const iend = ip + srcSize; BYTE *op = (BYTE *) dst; BYTE * const oend = op + outputSize; BYTE *cpy; const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize; static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4}; static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3}; const int safeDecode = (endOnInput == endOnInputSize); const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB))); /* Set up the "end" pointers for the shortcut. */ const BYTE *const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/; const BYTE *const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/; DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__, srcSize, outputSize); /* Special cases */ assert(lowPrefix <= op); assert(src != NULL); /* Empty output buffer */ if ((endOnInput) && (unlikely(outputSize == 0))) return ((srcSize == 1) && (*ip == 0)) ? 0 : -1; if ((!endOnInput) && (unlikely(outputSize == 0))) return (*ip == 0 ? 1 : -1); if ((endOnInput) && unlikely(srcSize == 0)) return -1; /* Main Loop : decode sequences */ while (1) { size_t length; const BYTE *match; size_t offset; /* get literal length */ unsigned int const token = *ip++; length = token>>ML_BITS; /* ip < iend before the increment */ assert(!endOnInput || ip <= iend); /* * A two-stage shortcut for the most common case: * 1) If the literal length is 0..14, and there is enough * space, enter the shortcut and copy 16 bytes on behalf * of the literals (in the fast mode, only 8 bytes can be * safely copied this way). * 2) Further if the match length is 4..18, copy 18 bytes * in a similar manner; but we ensure that there's enough * space in the output for those 18 bytes earlier, upon * entering the shortcut (in other words, there is a * combined check for both stages). * * The & in the likely() below is intentionally not && so that * some compilers can produce better parallelized runtime code */ if ((endOnInput ? length != RUN_MASK : length <= 8) /* * strictly "less than" on input, to re-enter * the loop with at least one byte */ && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend))) { /* Copy the literals */ LZ4_memcpy(op, ip, endOnInput ? 16 : 8); op += length; ip += length; /* * The second stage: * prepare for match copying, decode full info. * If it doesn't work out, the info won't be wasted. */ length = token & ML_MASK; /* match length */ offset = LZ4_readLE16(ip); ip += 2; match = op - offset; assert(match <= op); /* check overflow */ /* Do not deal with overlapping matches. */ if ((length != ML_MASK) && (offset >= 8) && (dict == withPrefix64k || match >= lowPrefix)) { /* Copy the match. */ LZ4_memcpy(op + 0, match + 0, 8); LZ4_memcpy(op + 8, match + 8, 8); LZ4_memcpy(op + 16, match + 16, 2); op += length + MINMATCH; /* Both stages worked, load the next token. */ continue; } /* * The second stage didn't work out, but the info * is ready. Propel it right to the point of match * copying. */ goto _copy_match; } /* decode literal length */ if (length == RUN_MASK) { unsigned int s; if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) { /* overflow detection */ goto _output_error; } do { s = *ip++; length += s; } while (likely(endOnInput ? ip < iend - RUN_MASK : 1) & (s == 255)); if ((safeDecode) && unlikely((uptrval)(op) + length < (uptrval)(op))) { /* overflow detection */ goto _output_error; } if ((safeDecode) && unlikely((uptrval)(ip) + length < (uptrval)(ip))) { /* overflow detection */ goto _output_error; } } /* copy literals */ cpy = op + length; LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); if (((endOnInput) && ((cpy > oend - MFLIMIT) || (ip + length > iend - (2 + 1 + LASTLITERALS)))) || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) { if (partialDecoding) { if (cpy > oend) { /* * Partial decoding : * stop in the middle of literal segment */ cpy = oend; length = oend - op; } if ((endOnInput) && (ip + length > iend)) { /* * Error : * read attempt beyond * end of input buffer */ goto _output_error; } } else { if ((!endOnInput) && (cpy != oend)) { /* * Error : * block decoding must * stop exactly there */ goto _output_error; } if ((endOnInput) && ((ip + length != iend) || (cpy > oend))) { /* * Error : * input must be consumed */ goto _output_error; } } /* * supports overlapping memory regions; only matters * for in-place decompression scenarios */ LZ4_memmove(op, ip, length); ip += length; op += length; /* Necessarily EOF, due to parsing restrictions */ if (!partialDecoding || (cpy == oend)) break; } else { /* may overwrite up to WILDCOPYLENGTH beyond cpy */ LZ4_wildCopy(op, ip, cpy); ip += length; op = cpy; } /* get offset */ offset = LZ4_readLE16(ip); ip += 2; match = op - offset; /* get matchlength */ length = token & ML_MASK; _copy_match: if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { /* Error : offset outside buffers */ goto _output_error; } /* costs ~1%; silence an msan warning when offset == 0 */ /* * note : when partialDecoding, there is no guarantee that * at least 4 bytes remain available in output buffer */ if (!partialDecoding) { assert(oend > op); assert(oend - op >= 4); LZ4_write32(op, (U32)offset); } if (length == ML_MASK) { unsigned int s; do { s = *ip++; if ((endOnInput) && (ip > iend - LASTLITERALS)) goto _output_error; length += s; } while (s == 255); if ((safeDecode) && unlikely( (uptrval)(op) + length < (uptrval)op)) { /* overflow detection */ goto _output_error; } } length += MINMATCH; /* match starting within external dictionary */ if ((dict == usingExtDict) && (match < lowPrefix)) { if (unlikely(op + length > oend - LASTLITERALS)) { /* doesn't respect parsing restriction */ if (!partialDecoding) goto _output_error; length = min(length, (size_t)(oend - op)); } if (length <= (size_t)(lowPrefix - match)) { /* * match fits entirely within external * dictionary : just copy */ memmove(op, dictEnd - (lowPrefix - match), length); op += length; } else { /* * match stretches into both external * dictionary and current block */ size_t const copySize = (size_t)(lowPrefix - match); size_t const restSize = length - copySize; LZ4_memcpy(op, dictEnd - copySize, copySize); op += copySize; if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ BYTE * const endOfMatch = op + restSize; const BYTE *copyFrom = lowPrefix; while (op < endOfMatch) *op++ = *copyFrom++; } else { LZ4_memcpy(op, lowPrefix, restSize); op += restSize; } } continue; } /* copy match within block */ cpy = op + length; /* * partialDecoding : * may not respect endBlock parsing restrictions */ assert(op <= oend); if (partialDecoding && (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) { size_t const mlen = min(length, (size_t)(oend - op)); const BYTE * const matchEnd = match + mlen; BYTE * const copyEnd = op + mlen; if (matchEnd > op) { /* overlap copy */ while (op < copyEnd) *op++ = *match++; } else { LZ4_memcpy(op, match, mlen); } op = copyEnd; if (op == oend) break; continue; } if (unlikely(offset < 8)) { op[0] = match[0]; op[1] = match[1]; op[2] = match[2]; op[3] = match[3]; match += inc32table[offset]; LZ4_memcpy(op + 4, match, 4); match -= dec64table[offset]; } else { LZ4_copy8(op, match); match += 8; } op += 8; if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) { BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1); if (cpy > oend - LASTLITERALS) { /* * Error : last LASTLITERALS bytes * must be literals (uncompressed) */ goto _output_error; } if (op < oCopyLimit) { LZ4_wildCopy(op, match, oCopyLimit); match += oCopyLimit - op; op = oCopyLimit; } while (op < cpy) *op++ = *match++; } else { LZ4_copy8(op, match); if (length > 16) LZ4_wildCopy(op + 8, match + 8, cpy); } op = cpy; /* wildcopy correction */ } /* end of decoding */ if (endOnInput) { /* Nb of output bytes decoded */ return (int) (((char *)op) - dst); } else { /* Nb of input bytes read */ return (int) (((const char *)ip) - src); } /* Overflow error detected */ _output_error: return (int) (-(((const char *)ip) - src)) - 1; } int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, int maxDecompressedSize) { return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, decode_full_block, noDict, (BYTE *)dest, NULL, 0); } int LZ4_decompress_safe_partial(const char *src, char *dst, int compressedSize, int targetOutputSize, int dstCapacity) { dstCapacity = min(targetOutputSize, dstCapacity); return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity, endOnInputSize, partial_decode, noDict, (BYTE *)dst, NULL, 0); } int LZ4_decompress_fast(const char *source, char *dest, int originalSize) { return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, decode_full_block, withPrefix64k, (BYTE *)dest - 64 * KB, NULL, 0); } /* ===== Instantiate a few more decoding cases, used more than once. ===== */ int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest, int compressedSize, int maxOutputSize) { return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, decode_full_block, withPrefix64k, (BYTE *)dest - 64 * KB, NULL, 0); } static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest, int compressedSize, int maxOutputSize, size_t prefixSize) { return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, decode_full_block, noDict, (BYTE *)dest - prefixSize, NULL, 0); } int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, int compressedSize, int maxOutputSize, const void *dictStart, size_t dictSize) { return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, decode_full_block, usingExtDict, (BYTE *)dest, (const BYTE *)dictStart, dictSize); } static int LZ4_decompress_fast_extDict(const char *source, char *dest, int originalSize, const void *dictStart, size_t dictSize) { return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, decode_full_block, usingExtDict, (BYTE *)dest, (const BYTE *)dictStart, dictSize); } /* * The "double dictionary" mode, for use with e.g. ring buffers: the first part * of the dictionary is passed as prefix, and the second via dictStart + dictSize. * These routines are used only once, in LZ4_decompress_*_continue(). */ static FORCE_INLINE int LZ4_decompress_safe_doubleDict(const char *source, char *dest, int compressedSize, int maxOutputSize, size_t prefixSize, const void *dictStart, size_t dictSize) { return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, decode_full_block, usingExtDict, (BYTE *)dest - prefixSize, (const BYTE *)dictStart, dictSize); } static FORCE_INLINE int LZ4_decompress_fast_doubleDict(const char *source, char *dest, int originalSize, size_t prefixSize, const void *dictStart, size_t dictSize) { return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, decode_full_block, usingExtDict, (BYTE *)dest - prefixSize, (const BYTE *)dictStart, dictSize); } /* ===== streaming decompression functions ===== */ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, const char *dictionary, int dictSize) { LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse; lz4sd->prefixSize = (size_t) dictSize; lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize; lz4sd->externalDict = NULL; lz4sd->extDictSize = 0; return 1; } /* * *_continue() : * These decoding functions allow decompression of multiple blocks * in "streaming" mode. * Previously decoded blocks must still be available at the memory * position where they were decoded. * If it's not possible, save the relevant part of * decoded data into a safe buffer, * and indicate where it stands using LZ4_setStreamDecode() */ int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int compressedSize, int maxOutputSize) { LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse; int result; if (lz4sd->prefixSize == 0) { /* The first call, no dictionary yet. */ assert(lz4sd->extDictSize == 0); result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); if (result <= 0) return result; lz4sd->prefixSize = result; lz4sd->prefixEnd = (BYTE *)dest + result; } else if (lz4sd->prefixEnd == (BYTE *)dest) { /* They're rolling the current segment. */ if (lz4sd->prefixSize >= 64 * KB - 1) result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); else if (lz4sd->extDictSize == 0) result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, lz4sd->prefixSize); else result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize, lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; lz4sd->prefixSize += result; lz4sd->prefixEnd += result; } else { /* * The buffer wraps around, or they're * switching to another buffer. */ lz4sd->extDictSize = lz4sd->prefixSize; lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; lz4sd->prefixSize = result; lz4sd->prefixEnd = (BYTE *)dest + result; } return result; } int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int originalSize) { LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse; int result; if (lz4sd->prefixSize == 0) { assert(lz4sd->extDictSize == 0); result = LZ4_decompress_fast(source, dest, originalSize); if (result <= 0) return result; lz4sd->prefixSize = originalSize; lz4sd->prefixEnd = (BYTE *)dest + originalSize; } else if (lz4sd->prefixEnd == (BYTE *)dest) { if (lz4sd->prefixSize >= 64 * KB - 1 || lz4sd->extDictSize == 0) result = LZ4_decompress_fast(source, dest, originalSize); else result = LZ4_decompress_fast_doubleDict(source, dest, originalSize, lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; lz4sd->prefixSize += originalSize; lz4sd->prefixEnd += originalSize; } else { lz4sd->extDictSize = lz4sd->prefixSize; lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; result = LZ4_decompress_fast_extDict(source, dest, originalSize, lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; lz4sd->prefixSize = originalSize; lz4sd->prefixEnd = (BYTE *)dest + originalSize; } return result; } int LZ4_decompress_safe_usingDict(const char *source, char *dest, int compressedSize, int maxOutputSize, const char *dictStart, int dictSize) { if (dictSize == 0) return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); if (dictStart+dictSize == dest) { if (dictSize >= 64 * KB - 1) return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, dictSize); } return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, dictSize); } int LZ4_decompress_fast_usingDict(const char *source, char *dest, int originalSize, const char *dictStart, int dictSize) { if (dictSize == 0 || dictStart + dictSize == dest) return LZ4_decompress_fast(source, dest, originalSize); return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, dictSize); } #ifndef STATIC EXPORT_SYMBOL(LZ4_decompress_safe); EXPORT_SYMBOL(LZ4_decompress_safe_partial); EXPORT_SYMBOL(LZ4_decompress_fast); EXPORT_SYMBOL(LZ4_setStreamDecode); EXPORT_SYMBOL(LZ4_decompress_safe_continue); EXPORT_SYMBOL(LZ4_decompress_fast_continue); EXPORT_SYMBOL(LZ4_decompress_safe_usingDict); EXPORT_SYMBOL(LZ4_decompress_fast_usingDict); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("LZ4 decompressor"); #endif
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
You can’t perform that action at this time.