1
0
mirror of https://github.com/soarqin/DSP_Mods_TO.git synced 2026-03-28 09:07:14 +08:00

update compression libraries, while formating codes

This commit is contained in:
2025-07-21 16:31:56 +08:00
parent 3cc1dfa750
commit 45a7552471
18 changed files with 1666 additions and 788 deletions

View File

@@ -1,6 +1,6 @@
/*
LZ4 - Fast LZ compression algorithm
Copyright (C) 2011-2020, Yann Collet.
Copyright (c) Yann Collet. All rights reserved.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
@@ -79,7 +79,7 @@
( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define LZ4_FORCE_MEMORY_ACCESS 2
# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || defined(_MSC_VER)
# define LZ4_FORCE_MEMORY_ACCESS 1
# endif
#endif
@@ -106,15 +106,13 @@
# define LZ4_SRC_INCLUDED 1
#endif
#ifndef LZ4_STATIC_LINKING_ONLY
#define LZ4_STATIC_LINKING_ONLY
#endif
#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
# define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
#endif
#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
#ifndef LZ4_STATIC_LINKING_ONLY
# define LZ4_STATIC_LINKING_ONLY
#endif
#include "lz4.h"
/* see also "memory routines" below */
@@ -126,14 +124,17 @@
# include <intrin.h> /* only present in VS2005+ */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */
# pragma warning(disable : 6239) /* disable: C6239: (<non-zero constant> && <expression>) always evaluates to the result of <expression> */
# pragma warning(disable : 6240) /* disable: C6240: (<expression> && <non-zero constant>) always evaluates to the result of <expression> */
# pragma warning(disable : 6326) /* disable: C6326: Potential comparison of a constant with another constant */
#endif /* _MSC_VER */
#ifndef LZ4_FORCE_INLINE
# ifdef _MSC_VER /* Visual Studio */
# if defined (_MSC_VER) && !defined (__clang__) /* MSVC */
# define LZ4_FORCE_INLINE static __forceinline
# else
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# ifdef __GNUC__
# if defined (__GNUC__) || defined (__clang__)
# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
# else
# define LZ4_FORCE_INLINE static inline
@@ -300,12 +301,12 @@ static int LZ4_isAligned(const void* ptr, size_t alignment)
#include <limits.h>
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# include <stdint.h>
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef uintptr_t uptrval;
typedef unsigned char BYTE; /*uint8_t not necessarily blessed to alias arbitrary type*/
typedef uint16_t U16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef uintptr_t uptrval;
#else
# if UINT_MAX != 4294967295UL
# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
@@ -365,6 +366,11 @@ static unsigned LZ4_isLittleEndian(void)
return one.c[0];
}
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
#define LZ4_PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__))
#elif defined(_MSC_VER)
#define LZ4_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop))
#endif
#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
/* lie to the compiler about data alignment; use with caution */
@@ -380,9 +386,9 @@ static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
typedef struct { U16 u16; } __attribute__((packed)) LZ4_unalign16;
typedef struct { U32 u32; } __attribute__((packed)) LZ4_unalign32;
typedef struct { reg_t uArch; } __attribute__((packed)) LZ4_unalignST;
LZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16;
LZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32;
LZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST;
static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; }
static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; }
@@ -427,10 +433,22 @@ static U16 LZ4_readLE16(const void* memPtr)
return LZ4_read16(memPtr);
} else {
const BYTE* p = (const BYTE*)memPtr;
return (U16)((U16)p[0] + (p[1]<<8));
return (U16)((U16)p[0] | (p[1]<<8));
}
}
#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT
static U32 LZ4_readLE32(const void* memPtr)
{
if (LZ4_isLittleEndian()) {
return LZ4_read32(memPtr);
} else {
const BYTE* p = (const BYTE*)memPtr;
return (U32)p[0] | (p[1]<<8) | (p[2]<<16) | (p[3]<<24);
}
}
#endif
static void LZ4_writeLE16(void* memPtr, U16 value)
{
if (LZ4_isLittleEndian()) {
@@ -460,13 +478,15 @@ static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
#ifndef LZ4_FAST_DEC_LOOP
# if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
# define LZ4_FAST_DEC_LOOP 1
# elif defined(__aarch64__) && defined(__APPLE__)
# define LZ4_FAST_DEC_LOOP 1
# elif defined(__aarch64__) && !defined(__clang__)
/* On non-Apple aarch64, we disable this optimization for clang because
# elif defined(__aarch64__)
# if defined(__clang__) && defined(__ANDROID__)
/* On Android aarch64, we disable this optimization for clang because
* on certain mobile chipsets, performance is reduced with clang. For
* more information refer to https://github.com/lz4/lz4/pull/707 */
# define LZ4_FAST_DEC_LOOP 1
# define LZ4_FAST_DEC_LOOP 0
# else
# define LZ4_FAST_DEC_LOOP 1
# endif
# else
# define LZ4_FAST_DEC_LOOP 0
# endif
@@ -512,7 +532,7 @@ LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
/* LZ4_memcpy_using_offset() presumes :
* - dstEnd >= dstPtr + MINMATCH
* - there is at least 8 bytes available to write after dstEnd */
* - there is at least 12 bytes available to write after dstEnd */
LZ4_FORCE_INLINE void
LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
{
@@ -527,12 +547,12 @@ LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const si
case 2:
LZ4_memcpy(v, srcPtr, 2);
LZ4_memcpy(&v[2], srcPtr, 2);
#if defined(_MSC_VER) && (_MSC_VER <= 1936) /* MSVC 2022 ver 17.6 or earlier */
#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */
# pragma warning(push)
# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */
#endif
LZ4_memcpy(&v[4], v, 4);
#if defined(_MSC_VER) && (_MSC_VER <= 1936) /* MSVC 2022 ver 17.6 or earlier */
#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */
# pragma warning(pop)
#endif
break;
@@ -779,7 +799,12 @@ LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
{
if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT
return LZ4_hash4(LZ4_readLE32(p), tableType);
#else
return LZ4_hash4(LZ4_read32(p), tableType);
#endif
}
LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
@@ -873,7 +898,7 @@ LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
|| tableType == byPtr
|| inputSize >= 4 KB)
{
DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", (void*)cctx);
MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
cctx->currentOffset = 0;
cctx->tableType = (U32)clearedTable;
@@ -898,7 +923,7 @@ LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
cctx->dictSize = 0;
}
/** LZ4_compress_generic() :
/** LZ4_compress_generic_validated() :
* inlined, to ensure branches are decided at compilation time.
* The following conditions are presumed already validated:
* - source != NULL
@@ -1080,7 +1105,10 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
/* Catch up */
filledIp = ip;
while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
assert(ip > anchor); /* this is always true as ip has been advanced before entering the main loop */
if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) {
do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1])));
}
/* Encode Literals */
{ unsigned const litLength = (unsigned)(ip - anchor);
@@ -1095,7 +1123,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
goto _last_literals;
}
if (litLength >= RUN_MASK) {
int len = (int)(litLength - RUN_MASK);
unsigned len = litLength - RUN_MASK;
*token = (RUN_MASK<<ML_BITS);
for(; len >= 255 ; len-=255) *op++ = 255;
*op++ = (BYTE)len;
@@ -1452,22 +1480,30 @@ int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacit
/* Note!: This function leaves the stream in an unclean/broken state!
* It is not safe to subsequently use the same state with a _fastReset() or
* _continue() call without resetting it. */
static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
static int LZ4_compress_destSize_extState_internal(LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)
{
void* const s = LZ4_initStream(state, sizeof (*state));
assert(s != NULL); (void)s;
if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, acceleration);
} else {
if (*srcSizePtr < LZ4_64Klimit) {
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, acceleration);
} else {
tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, acceleration);
} }
}
int LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)
{
int const r = LZ4_compress_destSize_extState_internal((LZ4_stream_t*)state, src, dst, srcSizePtr, targetDstSize, acceleration);
/* clean the state on exit */
LZ4_initStream(state, sizeof (LZ4_stream_t));
return r;
}
int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
{
@@ -1479,7 +1515,7 @@ int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targe
LZ4_stream_t* const ctx = &ctxBody;
#endif
int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
int result = LZ4_compress_destSize_extState_internal(ctx, src, dst, srcSizePtr, targetDstSize, 1);
#if (LZ4_HEAPMODE)
FREEMEM(ctx);
@@ -1498,7 +1534,7 @@ LZ4_stream_t* LZ4_createStream(void)
{
LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal));
DEBUGLOG(4, "LZ4_createStream %p", lz4s);
DEBUGLOG(4, "LZ4_createStream %p", (void*)lz4s);
if (lz4s == NULL) return NULL;
LZ4_initStream(lz4s, sizeof(*lz4s));
return lz4s;
@@ -1529,7 +1565,7 @@ LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
* prefer initStream() which is more general */
void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
{
DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", (void*)LZ4_stream);
MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
}
@@ -1541,15 +1577,18 @@ void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
{
if (!LZ4_stream) return 0; /* support free on NULL */
DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
DEBUGLOG(5, "LZ4_freeStream %p", (void*)LZ4_stream);
FREEMEM(LZ4_stream);
return (0);
}
#endif
typedef enum { _ld_fast, _ld_slow } LoadDict_mode_e;
#define HASH_UNIT sizeof(reg_t)
int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
int LZ4_loadDict_internal(LZ4_stream_t* LZ4_dict,
const char* dictionary, int dictSize,
LoadDict_mode_e _ld)
{
LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
const tableType_t tableType = byU32;
@@ -1557,7 +1596,7 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
const BYTE* const dictEnd = p + dictSize;
U32 idx32;
DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, (void*)dictionary, (void*)LZ4_dict);
/* It's necessary to reset the context,
* and not just continue it with prepareTable()
@@ -1585,20 +1624,46 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
while (p <= dictEnd-HASH_UNIT) {
U32 const h = LZ4_hashPosition(p, tableType);
/* Note: overwriting => favors positions end of dictionary */
LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
p+=3; idx32+=3;
}
if (_ld == _ld_slow) {
/* Fill hash table with additional references, to improve compression capability */
p = dict->dictionary;
idx32 = dict->currentOffset - dict->dictSize;
while (p <= dictEnd-HASH_UNIT) {
U32 const h = LZ4_hashPosition(p, tableType);
U32 const limit = dict->currentOffset - 64 KB;
if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <= limit) {
/* Note: not overwriting => favors positions beginning of dictionary */
LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
}
p++; idx32++;
}
}
return (int)dict->dictSize;
}
int LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
{
return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast);
}
int LZ4_loadDictSlow(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
{
return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow);
}
void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream)
{
const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL :
&(dictionaryStream->internal_donotuse);
DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
workingStream, dictionaryStream,
(void*)workingStream, (void*)dictionaryStream,
dictCtx != NULL ? dictCtx->dictSize : 0);
if (dictCtx != NULL) {
@@ -1662,7 +1727,7 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
&& (inputSize > 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */
&& (streamPtr->dictCtx == NULL) /* usingDictCtx */
) {
DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary);
DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, (void*)streamPtr->dictionary);
/* remove dictionary existence from history, to employ faster prefix mode */
streamPtr->dictSize = 0;
streamPtr->dictionary = (const BYTE*)source;
@@ -1752,7 +1817,7 @@ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
{
LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize, safeBuffer);
DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize, (void*)safeBuffer);
if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
@@ -1923,6 +1988,17 @@ read_variable_length(const BYTE** ip, const BYTE* ilimit,
if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */
return rvl_error;
}
s = **ip;
(*ip)++;
length += s;
if (unlikely((*ip) > ilimit)) { /* read limit reached */
return rvl_error;
}
/* accumulator overflow detection (32-bit mode only) */
if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
return rvl_error;
}
if (likely(s != 255)) return length;
do {
s = **ip;
(*ip)++;
@@ -1931,10 +2007,10 @@ read_variable_length(const BYTE** ip, const BYTE* ilimit,
return rvl_error;
}
/* accumulator overflow detection (32-bit mode only) */
if ((sizeof(length)<8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
return rvl_error;
}
} while (s==255);
} while (s == 255);
return length;
}
@@ -2000,7 +2076,7 @@ LZ4_decompress_generic(
* note : fast loop may show a regression for some client arm chips. */
#if LZ4_FAST_DEC_LOOP
if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
DEBUGLOG(6, "skip fast decode loop");
DEBUGLOG(6, "move to safe decode loop");
goto safe_decode;
}
@@ -2012,6 +2088,7 @@ LZ4_decompress_generic(
assert(ip < iend);
token = *ip++;
length = token >> ML_BITS; /* literal length */
DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
/* decode literal length */
if (length == RUN_MASK) {
@@ -2025,49 +2102,47 @@ LZ4_decompress_generic(
if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
/* copy literals */
cpy = op+length;
LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
LZ4_wildCopy32(op, ip, cpy);
ip += length; op = cpy;
} else {
cpy = op+length;
DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
if ((op+length>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
LZ4_wildCopy32(op, ip, op+length);
ip += length; op += length;
} else if (ip <= iend-(16 + 1/*max lit + offset + nextToken*/)) {
/* We don't need to check oend, since we check it once for each loop below */
if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
/* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */
LZ4_memcpy(op, ip, 16);
ip += length; op = cpy;
ip += length; op += length;
} else {
goto safe_literal_copy;
}
/* get offset */
offset = LZ4_readLE16(ip); ip+=2;
DEBUGLOG(6, " offset = %zu", offset);
DEBUGLOG(6, "blockPos%6u: offset = %u", (unsigned)(op-(BYTE*)dst), (unsigned)offset);
match = op - offset;
assert(match <= op); /* overflow check */
/* get matchlength */
length = token & ML_MASK;
DEBUGLOG(7, " match length token = %u (len==%u)", (unsigned)length, (unsigned)length+MINMATCH);
if (length == ML_MASK) {
size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
if (addl == rvl_error) {
DEBUGLOG(6, "error reading long match length");
DEBUGLOG(5, "error reading long match length");
goto _output_error;
}
length += addl;
length += MINMATCH;
DEBUGLOG(7, " long match length == %u", (unsigned)length);
if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
DEBUGLOG(6, "Error : offset outside buffers");
goto _output_error;
}
if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
goto safe_match_copy;
}
} else {
length += MINMATCH;
if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
DEBUGLOG(7, "moving to safe_match_copy (ml==%u)", (unsigned)length);
goto safe_match_copy;
}
@@ -2086,7 +2161,7 @@ LZ4_decompress_generic(
} } }
if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) {
DEBUGLOG(6, "Error : pos=%zi, offset=%zi => outside buffers", op-lowPrefix, op-match);
DEBUGLOG(5, "Error : pos=%zi, offset=%zi => outside buffers", op-lowPrefix, op-match);
goto _output_error;
}
/* match starting within external dictionary */
@@ -2143,6 +2218,7 @@ LZ4_decompress_generic(
assert(ip < iend);
token = *ip++;
length = token >> ML_BITS; /* literal length */
DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
/* A two-stage shortcut for the most common case:
* 1) If the literal length is 0..14, and there is enough space,
@@ -2163,6 +2239,7 @@ LZ4_decompress_generic(
/* The second stage: prepare for match copying, decode full info.
* If it doesn't work out, the info won't be wasted. */
length = token & ML_MASK; /* match length */
DEBUGLOG(7, "blockPos%6u: matchLength token = %u (len=%u)", (unsigned)(op-(BYTE*)dst), (unsigned)length, (unsigned)length + 4);
offset = LZ4_readLE16(ip); ip += 2;
match = op - offset;
assert(match <= op); /* check overflow */
@@ -2194,11 +2271,12 @@ LZ4_decompress_generic(
if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
}
/* copy literals */
cpy = op+length;
#if LZ4_FAST_DEC_LOOP
safe_literal_copy:
#endif
/* copy literals */
cpy = op+length;
LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) {
/* We've either hit the input parsing restriction or the output parsing restriction.
@@ -2234,9 +2312,10 @@ LZ4_decompress_generic(
* so check that we exactly consume the input and don't overrun the output buffer.
*/
if ((ip+length != iend) || (cpy > oend)) {
DEBUGLOG(6, "should have been last run of literals")
DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
DEBUGLOG(5, "should have been last run of literals")
DEBUGLOG(5, "ip(%p) + length(%i) = %p != iend (%p)", (void*)ip, (int)length, (void*)(ip+length), (void*)iend);
DEBUGLOG(5, "or cpy(%p) > (oend-MFLIMIT)(%p)", (void*)cpy, (void*)(oend-MFLIMIT));
DEBUGLOG(5, "after writing %u bytes / %i bytes available", (unsigned)(op-(BYTE*)dst), outputSize);
goto _output_error;
}
}
@@ -2262,6 +2341,7 @@ LZ4_decompress_generic(
/* get matchlength */
length = token & ML_MASK;
DEBUGLOG(7, "blockPos%6u: matchLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
_copy_match:
if (length == ML_MASK) {
@@ -2351,7 +2431,7 @@ LZ4_decompress_generic(
while (op < cpy) { *op++ = *match++; }
} else {
LZ4_memcpy(op, match, 8);
if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
}
op = cpy; /* wildcopy correction */
}