1
0
mirror of https://github.com/soarqin/DSP_Mods.git synced 2025-12-08 22:53:33 +08:00

CompressSave: release 1.3.0

This commit is contained in:
2023-07-12 00:02:45 +08:00
parent cb88f268f6
commit f7e1882fa3
14 changed files with 532 additions and 231 deletions

View File

@@ -37,7 +37,8 @@
**************************************/
/*
* LZ4_HEAPMODE :
* Select how default compression functions will allocate memory for their hash table,
* Select how stateless compression functions like `LZ4_compress_default()`
* allocate memory for their hash table,
* in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
*/
#ifndef LZ4_HEAPMODE
@@ -279,7 +280,7 @@ static const int LZ4_minLength = (MFLIMIT+1);
static int g_debuglog_enable = 1;
# define DEBUGLOG(l, ...) { \
if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
fprintf(stderr, __FILE__ ": "); \
fprintf(stderr, __FILE__ " %i: ", __LINE__); \
fprintf(stderr, __VA_ARGS__); \
fprintf(stderr, " \n"); \
} }
@@ -379,14 +380,16 @@ static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) LZ4_unalign;
typedef struct { U16 u16; } __attribute__((packed)) LZ4_unalign16;
typedef struct { U32 u32; } __attribute__((packed)) LZ4_unalign32;
typedef struct { reg_t uArch; } __attribute__((packed)) LZ4_unalignST;
static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign*)ptr)->u16; }
static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign*)ptr)->u32; }
static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalign*)ptr)->uArch; }
static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; }
static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; }
static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; }
static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign*)memPtr)->u16 = value; }
static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign*)memPtr)->u32 = value; }
static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; }
static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign32*)memPtr)->u32 = value; }
#else /* safe and portable access using memcpy() */
@@ -524,12 +527,12 @@ LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const si
case 2:
LZ4_memcpy(v, srcPtr, 2);
LZ4_memcpy(&v[2], srcPtr, 2);
#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */
#if defined(_MSC_VER) && (_MSC_VER <= 1936) /* MSVC 2022 ver 17.6 or earlier */
# pragma warning(push)
# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */
#endif
LZ4_memcpy(&v[4], v, 4);
#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */
#if defined(_MSC_VER) && (_MSC_VER <= 1936) /* MSVC 2022 ver 17.6 or earlier */
# pragma warning(pop)
#endif
break;
@@ -803,23 +806,19 @@ LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableT
}
}
/* LZ4_putPosition*() : only used in byPtr mode */
LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,
void* tableBase, tableType_t const tableType,
const BYTE* srcBase)
void* tableBase, tableType_t const tableType)
{
switch (tableType)
{
case clearedTable: { /* illegal! */ assert(0); return; }
case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
}
const BYTE** const hashTable = (const BYTE**)tableBase;
assert(tableType == byPtr); (void)tableType;
hashTable[h] = p;
}
LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType)
{
U32 const h = LZ4_hashPosition(p, tableType);
LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
LZ4_putPositionOnHash(p, h, tableBase, tableType);
}
/* LZ4_getIndexOnHash() :
@@ -901,7 +900,7 @@ LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
/** LZ4_compress_generic() :
* inlined, to ensure branches are decided at compilation time.
* Presumed already validated at this stage:
* The following conditions are presumed already validated:
* - source != NULL
* - inputSize > 0
*/
@@ -919,10 +918,10 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
const int acceleration)
{
int result;
const BYTE* ip = (const BYTE*) source;
const BYTE* ip = (const BYTE*)source;
U32 const startIndex = cctx->currentOffset;
const BYTE* base = (const BYTE*) source - startIndex;
const BYTE* base = (const BYTE*)source - startIndex;
const BYTE* lowLimit;
const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
@@ -930,7 +929,8 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
const U32 dictSize =
dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */
const U32 dictDelta =
(dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with indexes in current context */
int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
@@ -955,11 +955,11 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
assert(ip != NULL);
if (tableType == byU16) assert(inputSize<LZ4_64Klimit); /* Size too large (not within 64K limit) */
if (tableType == byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
/* If init conditions are not met, we don't have to mark stream
* as having dirty context, since no action was taken yet */
if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
assert(acceleration >= 1);
lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
@@ -979,7 +979,12 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
/* First Byte */
LZ4_putPosition(ip, cctx->hashTable, tableType, base);
{ U32 const h = LZ4_hashPosition(ip, tableType);
if (tableType == byPtr) {
LZ4_putPositionOnHash(ip, h, cctx->hashTable, byPtr);
} else {
LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType);
} }
ip++; forwardH = LZ4_hashPosition(ip, tableType);
/* Main Loop */
@@ -1004,7 +1009,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType);
forwardH = LZ4_hashPosition(forwardIp, tableType);
LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType);
} while ( (match+LZ4_DISTANCE_MAX < ip)
|| (LZ4_read32(match) != LZ4_read32(ip)) );
@@ -1202,13 +1207,19 @@ _next_match:
if (ip >= mflimitPlusOne) break;
/* Fill table */
LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
{ U32 const h = LZ4_hashPosition(ip-2, tableType);
if (tableType == byPtr) {
LZ4_putPositionOnHash(ip-2, h, cctx->hashTable, byPtr);
} else {
U32 const idx = (U32)((ip-2) - base);
LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType);
} }
/* Test next position */
if (tableType == byPtr) {
match = LZ4_getPosition(ip, cctx->hashTable, tableType);
LZ4_putPosition(ip, cctx->hashTable, tableType, base);
LZ4_putPosition(ip, cctx->hashTable, tableType);
if ( (match+LZ4_DISTANCE_MAX >= ip)
&& (LZ4_read32(match) == LZ4_read32(ip)) )
{ token=op++; *token=0; goto _next_match; }
@@ -1222,6 +1233,7 @@ _next_match:
if (dictDirective == usingDictCtx) {
if (matchIndex < startIndex) {
/* there was no match, try the dictionary */
assert(tableType == byU32);
matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
match = dictBase + matchIndex;
lowLimit = dictionary; /* required for match length counter */
@@ -1375,9 +1387,10 @@ int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int
*/
int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
{
LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
LZ4_stream_t_internal* const ctx = &((LZ4_stream_t*)state)->internal_donotuse;
if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
assert(ctx != NULL);
if (dstCapacity >= LZ4_compressBound(srcSize)) {
if (srcSize < LZ4_64Klimit) {
@@ -1411,17 +1424,17 @@ int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst
}
int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
int LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity, int acceleration)
{
int result;
#if (LZ4_HEAPMODE)
LZ4_stream_t* ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
LZ4_stream_t* const ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
if (ctxPtr == NULL) return 0;
#else
LZ4_stream_t ctx;
LZ4_stream_t* const ctxPtr = &ctx;
#endif
result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
result = LZ4_compress_fast_extState(ctxPtr, src, dest, srcSize, dstCapacity, acceleration);
#if (LZ4_HEAPMODE)
FREEMEM(ctxPtr);
@@ -1430,9 +1443,9 @@ int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutp
}
int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity)
{
return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1);
return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1);
}
@@ -1459,11 +1472,11 @@ static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src,
int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
{
#if (LZ4_HEAPMODE)
LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
LZ4_stream_t* const ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
if (ctx == NULL) return 0;
#else
LZ4_stream_t ctxBody;
LZ4_stream_t* ctx = &ctxBody;
LZ4_stream_t* const ctx = &ctxBody;
#endif
int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
@@ -1538,11 +1551,11 @@ int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
#define HASH_UNIT sizeof(reg_t)
int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
{
LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
const tableType_t tableType = byU32;
const BYTE* p = (const BYTE*)dictionary;
const BYTE* const dictEnd = p + dictSize;
const BYTE* base;
U32 idx32;
DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
@@ -1565,14 +1578,15 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
}
if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
base = dictEnd - dict->currentOffset;
dict->dictionary = p;
dict->dictSize = (U32)(dictEnd - p);
dict->tableType = (U32)tableType;
idx32 = dict->currentOffset - dict->dictSize;
while (p <= dictEnd-HASH_UNIT) {
LZ4_putPosition(p, dict->hashTable, tableType, base);
p+=3;
U32 const h = LZ4_hashPosition(p, tableType);
LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
p+=3; idx32+=3;
}
return (int)dict->dictSize;
@@ -1709,7 +1723,7 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
/* Hidden debug function, to force-test external dictionary mode */
int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
{
LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse;
int result;
LZ4_renormDictT(streamPtr, srcSize);
@@ -1772,7 +1786,7 @@ typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
* does not know end of input
* presumes input is well formed
* note : will consume at least one byte */
size_t read_long_length_no_check(const BYTE** pp)
static size_t read_long_length_no_check(const BYTE** pp)
{
size_t b, l = 0;
do { b = **pp; (*pp)++; l += b; } while (b==255);
@@ -1991,6 +2005,7 @@ LZ4_decompress_generic(
}
/* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */
DEBUGLOG(6, "using fast decode loop");
while (1) {
/* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
@@ -2001,7 +2016,10 @@ LZ4_decompress_generic(
/* decode literal length */
if (length == RUN_MASK) {
size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);
if (addl == rvl_error) { goto _output_error; }
if (addl == rvl_error) {
DEBUGLOG(6, "error reading long literal length");
goto _output_error;
}
length += addl;
if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
@@ -2024,6 +2042,7 @@ LZ4_decompress_generic(
/* get offset */
offset = LZ4_readLE16(ip); ip+=2;
DEBUGLOG(6, " offset = %zu", offset);
match = op - offset;
assert(match <= op); /* overflow check */
@@ -2032,11 +2051,17 @@ LZ4_decompress_generic(
if (length == ML_MASK) {
size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
if (addl == rvl_error) { goto _output_error; }
if (addl == rvl_error) {
DEBUGLOG(6, "error reading long match length");
goto _output_error;
}
length += addl;
length += MINMATCH;
if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
DEBUGLOG(6, "Error : offset outside buffers");
goto _output_error;
}
if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
goto safe_match_copy;
}
@@ -2060,7 +2085,10 @@ LZ4_decompress_generic(
continue;
} } }
if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) {
DEBUGLOG(6, "Error : pos=%zi, offset=%zi => outside buffers", op-lowPrefix, op-match);
goto _output_error;
}
/* match starting within external dictionary */
if ((dict==usingExtDict) && (match < lowPrefix)) {
assert(dictEnd != NULL);
@@ -2069,7 +2097,8 @@ LZ4_decompress_generic(
DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
length = MIN(length, (size_t)(oend-op));
} else {
goto _output_error; /* end-of-block condition violated */
DEBUGLOG(6, "end-of-block condition violated")
goto _output_error;
} }
if (length <= (size_t)(lowPrefix-match)) {
@@ -2109,6 +2138,7 @@ LZ4_decompress_generic(
#endif
/* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
DEBUGLOG(6, "using safe decode loop");
while (1) {
assert(ip < iend);
token = *ip++;
@@ -2416,6 +2446,7 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
int compressedSize, int maxOutputSize,
const void* dictStart, size_t dictSize)
{
DEBUGLOG(5, "LZ4_decompress_safe_forceExtDict");
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
decode_full_block, usingExtDict,
(BYTE*)dest, (const BYTE*)dictStart, dictSize);