mirror of
https://github.com/soarqin/DSP_Mods_TO.git
synced 2026-02-04 22:22:22 +08:00
update compression libraries, while formating codes
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
cmake_minimum_required(VERSION 3.2)
|
||||
cmake_minimum_required(VERSION 3.12)
|
||||
|
||||
project(lz4wrap)
|
||||
|
||||
|
||||
@@ -5,16 +5,19 @@
|
||||
#include <limits.h>
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#define Check(assert,errorcode) if(!(assert)) {printf(LZ4F_getErrorName(errorcode)); return errorcode;}
|
||||
#define Check(assert, errorcode) \
|
||||
if (!(assert)) \
|
||||
{ \
|
||||
printf("%s\n", LZ4F_getErrorName(errorcode)); \
|
||||
return errorcode; \
|
||||
}
|
||||
|
||||
|
||||
|
||||
static CContext* CreateCompressContext()
|
||||
static CContext *CreateCompressContext()
|
||||
{
|
||||
return (CContext*)malloc(sizeof(CContext));
|
||||
return (CContext *)malloc(sizeof(CContext));
|
||||
}
|
||||
|
||||
void __stdcall CompressContextFree(CContext* ctx)
|
||||
void __stdcall CompressContextFree(CContext *ctx)
|
||||
{
|
||||
if (ctx != NULL)
|
||||
{
|
||||
@@ -25,12 +28,12 @@ void __stdcall CompressContextFree(CContext* ctx)
|
||||
}
|
||||
|
||||
static LZ4F_preferences_t kPrefs = {
|
||||
{ LZ4F_max4MB, LZ4F_blockLinked, LZ4F_contentChecksumEnabled, LZ4F_frame,
|
||||
0 /* unknown content size */, 0/* no dictID */ , LZ4F_blockChecksumEnabled },
|
||||
0, /* compression level; 0 == default */
|
||||
0, /* autoflush */
|
||||
0, /* favor decompression speed */
|
||||
{ 0, 0, 0 }, /* reserved, must be set to 0 */
|
||||
{LZ4F_max4MB, LZ4F_blockLinked, LZ4F_contentChecksumEnabled, LZ4F_frame,
|
||||
0 /* unknown content size */, 0 /* no dictID */, LZ4F_blockChecksumEnabled},
|
||||
0, /* compression level; 0 == default */
|
||||
0, /* autoflush */
|
||||
0, /* favor decompression speed */
|
||||
{0, 0, 0}, /* reserved, must be set to 0 */
|
||||
};
|
||||
|
||||
size_t __stdcall CompressBufferBound(size_t inBufferSize)
|
||||
@@ -38,8 +41,9 @@ size_t __stdcall CompressBufferBound(size_t inBufferSize)
|
||||
return LZ4F_compressBound(inBufferSize, &kPrefs) + LZ4F_HEADER_SIZE_MAX;
|
||||
}
|
||||
|
||||
CContext* CreateCompressContextFromBuffer(void* dict, size_t dictSize) {
|
||||
CContext* ctx = CreateCompressContext();
|
||||
CContext *CreateCompressContextFromBuffer(void *dict, size_t dictSize)
|
||||
{
|
||||
CContext *ctx = CreateCompressContext();
|
||||
if (dict)
|
||||
ctx->dict = LZ4F_createCDict(dict, dictSize);
|
||||
else
|
||||
@@ -59,18 +63,20 @@ CContext* CreateCompressContextFromBuffer(void* dict, size_t dictSize) {
|
||||
return ctx;
|
||||
}
|
||||
|
||||
size_t __stdcall CompressBegin(CContext** pctx, int compressionLevel, void* outBuff , size_t outCapacity, void* dict, size_t dictSize)
|
||||
size_t __stdcall CompressBegin(CContext **pctx, int compressionLevel, void *outBuff, size_t outCapacity, void *dict, size_t dictSize)
|
||||
{
|
||||
CContext* ctx = CreateCompressContextFromBuffer(dict, dictSize);
|
||||
if (ctx == NULL) return -1;
|
||||
CContext *ctx = CreateCompressContextFromBuffer(dict, dictSize);
|
||||
if (ctx == NULL)
|
||||
return -1;
|
||||
|
||||
if (outCapacity < LZ4F_HEADER_SIZE_MAX || outCapacity < LZ4F_compressBound(0, &kPrefs)) return LZ4F_ERROR_dstMaxSize_tooSmall;
|
||||
if (outCapacity < LZ4F_HEADER_SIZE_MAX || outCapacity < LZ4F_compressBound(0, &kPrefs))
|
||||
return LZ4F_ERROR_dstMaxSize_tooSmall;
|
||||
|
||||
kPrefs.compressionLevel = compressionLevel;
|
||||
/* write frame header */
|
||||
size_t const headerSize = ctx->dict == NULL
|
||||
? LZ4F_compressBegin(ctx->cctx, outBuff, outCapacity, &kPrefs)
|
||||
: LZ4F_compressBegin_usingCDict(ctx->cctx, outBuff, outCapacity, ctx->dict, &kPrefs);
|
||||
size_t const headerSize = ctx->dict == NULL
|
||||
? LZ4F_compressBegin(ctx->cctx, outBuff, outCapacity, &kPrefs)
|
||||
: LZ4F_compressBegin_usingCDict(ctx->cctx, outBuff, outCapacity, ctx->dict, &kPrefs);
|
||||
|
||||
if (LZ4F_isError(headerSize))
|
||||
{
|
||||
@@ -80,12 +86,11 @@ size_t __stdcall CompressBegin(CContext** pctx, int compressionLevel, void* outB
|
||||
return headerSize;
|
||||
}
|
||||
|
||||
|
||||
size_t __stdcall CompressUpdate(CContext* ctx,void* dstBuffer, size_t dstCapacity,const void* srcBuffer, size_t srcSize)
|
||||
size_t __stdcall CompressUpdate(CContext *ctx, void *dstBuffer, size_t dstCapacity, const void *srcBuffer, size_t srcSize)
|
||||
{
|
||||
size_t result = ctx->dict == NULL
|
||||
? LZ4F_compressUpdate(ctx->cctx, dstBuffer, dstCapacity, srcBuffer, srcSize, NULL)
|
||||
: LZ4F_compressFrame_usingCDict(ctx->cctx, dstBuffer, dstCapacity, srcBuffer, srcSize, ctx->dict, NULL);
|
||||
? LZ4F_compressUpdate(ctx->cctx, dstBuffer, dstCapacity, srcBuffer, srcSize, NULL)
|
||||
: LZ4F_compressFrame_usingCDict(ctx->cctx, dstBuffer, dstCapacity, srcBuffer, srcSize, ctx->dict, NULL);
|
||||
if (LZ4F_isError(result))
|
||||
{
|
||||
const char *str = LZ4F_getErrorName(result);
|
||||
@@ -94,32 +99,38 @@ size_t __stdcall CompressUpdate(CContext* ctx,void* dstBuffer, size_t dstCapacit
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t __stdcall CompressEnd(CContext* ctx, void* dstBuffer, size_t dstCapacity)
|
||||
size_t __stdcall CompressEnd(CContext *ctx, void *dstBuffer, size_t dstCapacity)
|
||||
{
|
||||
size_t writeSize = LZ4F_compressEnd(ctx->cctx, dstBuffer, dstCapacity, NULL);
|
||||
return writeSize;
|
||||
}
|
||||
|
||||
static size_t get_block_size(const LZ4F_frameInfo_t* info) {
|
||||
switch (info->blockSizeID) {
|
||||
case LZ4F_default:
|
||||
case LZ4F_max64KB: return 1 << 16;
|
||||
case LZ4F_max256KB: return 1 << 18;
|
||||
case LZ4F_max1MB: return 1 << 20;
|
||||
case LZ4F_max4MB: return 1 << 22;
|
||||
default:
|
||||
return -1;
|
||||
static size_t get_block_size(const LZ4F_frameInfo_t *info)
|
||||
{
|
||||
switch (info->blockSizeID)
|
||||
{
|
||||
case LZ4F_default:
|
||||
case LZ4F_max64KB:
|
||||
return 1 << 16;
|
||||
case LZ4F_max256KB:
|
||||
return 1 << 18;
|
||||
case LZ4F_max1MB:
|
||||
return 1 << 20;
|
||||
case LZ4F_max4MB:
|
||||
return 1 << 22;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
//return: input bytes expects for next call
|
||||
size_t __stdcall DecompressBegin(DContext **pdctx,void *inBuffer,size_t *inBufferSize, size_t *blockSize, void* dict, size_t dictSize)
|
||||
// return: input bytes expects for next call
|
||||
size_t __stdcall DecompressBegin(DContext **pdctx, void *inBuffer, size_t *inBufferSize, size_t *blockSize, void *dict, size_t dictSize)
|
||||
{
|
||||
DContext* dctx = (DContext*)malloc(sizeof(DContext));
|
||||
DContext *dctx = (DContext *)malloc(sizeof(DContext));
|
||||
size_t const dctxStatus = LZ4F_createDecompressionContext(&dctx->dctx, LZ4F_VERSION);
|
||||
Check(!LZ4F_isError(dctxStatus), dctxStatus);
|
||||
|
||||
Check(*inBufferSize >= LZ4F_HEADER_SIZE_MAX, LZ4F_ERROR_dstMaxSize_tooSmall);
|
||||
|
||||
|
||||
LZ4F_frameInfo_t info;
|
||||
size_t const fires = LZ4F_getFrameInfo(dctx->dctx, &info, inBuffer, inBufferSize);
|
||||
Check(!LZ4F_isError(fires), fires);
|
||||
@@ -131,21 +142,21 @@ size_t __stdcall DecompressBegin(DContext **pdctx,void *inBuffer,size_t *inBuffe
|
||||
return fires;
|
||||
}
|
||||
|
||||
void __stdcall DecompressContextReset(DContext* dctx)
|
||||
void __stdcall DecompressContextReset(DContext *dctx)
|
||||
{
|
||||
LZ4F_resetDecompressionContext(dctx->dctx);
|
||||
}
|
||||
|
||||
size_t __stdcall DecompressUpdate(DContext* dctx, void* outBuffer, size_t * outBufferSize, void* inBuffer, size_t * inBufferSize)
|
||||
size_t __stdcall DecompressUpdate(DContext *dctx, void *outBuffer, size_t *outBufferSize, void *inBuffer, size_t *inBufferSize)
|
||||
{
|
||||
size_t ret = dctx->dict == NULL
|
||||
? LZ4F_decompress(dctx->dctx, outBuffer, outBufferSize, inBuffer, inBufferSize, NULL)
|
||||
: LZ4F_decompress_usingDict(dctx->dctx, outBuffer, outBufferSize, inBuffer, inBufferSize, dctx->dict, dctx->dictSize, NULL);
|
||||
? LZ4F_decompress(dctx->dctx, outBuffer, outBufferSize, inBuffer, inBufferSize, NULL)
|
||||
: LZ4F_decompress_usingDict(dctx->dctx, outBuffer, outBufferSize, inBuffer, inBufferSize, dctx->dict, dctx->dictSize, NULL);
|
||||
Check(!LZ4F_isError(ret), ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t __stdcall DecompressEnd(DContext* dctx)
|
||||
size_t __stdcall DecompressEnd(DContext *dctx)
|
||||
{
|
||||
if (!dctx) return 0;
|
||||
size_t r = LZ4F_freeDecompressionContext(dctx->dctx);
|
||||
|
||||
@@ -17,7 +17,7 @@ They generate and decode data using the [LZ4 block format].
|
||||
|
||||
#### Level 2 : High Compression variant
|
||||
|
||||
For more compression ratio at the cost of compression speed,
|
||||
For better compression ratio at the cost of compression speed,
|
||||
the High Compression variant called **lz4hc** is available.
|
||||
Add files **`lz4hc.c`** and **`lz4hc.h`**.
|
||||
This variant also compresses data using the [LZ4 block format],
|
||||
@@ -108,6 +108,12 @@ The following build macro can be selected to adjust source code behavior at comp
|
||||
Remove support of dynamic memory allocation.
|
||||
For more details, see description of this macro in `lib/lz4.c`.
|
||||
|
||||
- `LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT` : experimental feature aimed at producing the same
|
||||
compressed output on platforms of different endianness (i.e. little-endian and big-endian).
|
||||
Output on little-endian platforms shall remain unchanged, while big-endian platforms will start producing
|
||||
the same output as little-endian ones. This isn't expected to impact backward- and forward-compatibility
|
||||
in any way.
|
||||
|
||||
- `LZ4_FREESTANDING` : by setting this build macro to 1,
|
||||
LZ4/HC removes dependencies on the C standard library,
|
||||
including allocation functions and `memmove()`, `memcpy()`, and `memset()`.
|
||||
@@ -131,8 +137,8 @@ The following build macro can be selected to adjust source code behavior at comp
|
||||
#### Makefile variables
|
||||
|
||||
The following `Makefile` variables can be selected to alter the profile of produced binaries :
|
||||
- `BUILD_SHARED` : generate `libzstd` dynamic library (enabled by default)
|
||||
- `BUILD_STATIC` : generate `libzstd` static library (enabled by default)
|
||||
- `BUILD_SHARED` : generate `liblz4` dynamic library (enabled by default)
|
||||
- `BUILD_STATIC` : generate `liblz4` static library (enabled by default)
|
||||
|
||||
|
||||
#### Amalgamation
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
LZ4 - Fast LZ compression algorithm
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
Copyright (c) Yann Collet. All rights reserved.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
@@ -79,7 +79,7 @@
|
||||
( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
|
||||
|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
|
||||
# define LZ4_FORCE_MEMORY_ACCESS 2
|
||||
# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
|
||||
# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || defined(_MSC_VER)
|
||||
# define LZ4_FORCE_MEMORY_ACCESS 1
|
||||
# endif
|
||||
#endif
|
||||
@@ -106,15 +106,13 @@
|
||||
# define LZ4_SRC_INCLUDED 1
|
||||
#endif
|
||||
|
||||
#ifndef LZ4_STATIC_LINKING_ONLY
|
||||
#define LZ4_STATIC_LINKING_ONLY
|
||||
#endif
|
||||
|
||||
#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
|
||||
#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
|
||||
# define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
|
||||
#endif
|
||||
|
||||
#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
|
||||
#ifndef LZ4_STATIC_LINKING_ONLY
|
||||
# define LZ4_STATIC_LINKING_ONLY
|
||||
#endif
|
||||
#include "lz4.h"
|
||||
/* see also "memory routines" below */
|
||||
|
||||
@@ -126,14 +124,17 @@
|
||||
# include <intrin.h> /* only present in VS2005+ */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */
|
||||
# pragma warning(disable : 6239) /* disable: C6239: (<non-zero constant> && <expression>) always evaluates to the result of <expression> */
|
||||
# pragma warning(disable : 6240) /* disable: C6240: (<expression> && <non-zero constant>) always evaluates to the result of <expression> */
|
||||
# pragma warning(disable : 6326) /* disable: C6326: Potential comparison of a constant with another constant */
|
||||
#endif /* _MSC_VER */
|
||||
|
||||
#ifndef LZ4_FORCE_INLINE
|
||||
# ifdef _MSC_VER /* Visual Studio */
|
||||
# if defined (_MSC_VER) && !defined (__clang__) /* MSVC */
|
||||
# define LZ4_FORCE_INLINE static __forceinline
|
||||
# else
|
||||
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# ifdef __GNUC__
|
||||
# if defined (__GNUC__) || defined (__clang__)
|
||||
# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
|
||||
# else
|
||||
# define LZ4_FORCE_INLINE static inline
|
||||
@@ -300,12 +301,12 @@ static int LZ4_isAligned(const void* ptr, size_t alignment)
|
||||
#include <limits.h>
|
||||
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# include <stdint.h>
|
||||
typedef uint8_t BYTE;
|
||||
typedef uint16_t U16;
|
||||
typedef uint32_t U32;
|
||||
typedef int32_t S32;
|
||||
typedef uint64_t U64;
|
||||
typedef uintptr_t uptrval;
|
||||
typedef unsigned char BYTE; /*uint8_t not necessarily blessed to alias arbitrary type*/
|
||||
typedef uint16_t U16;
|
||||
typedef uint32_t U32;
|
||||
typedef int32_t S32;
|
||||
typedef uint64_t U64;
|
||||
typedef uintptr_t uptrval;
|
||||
#else
|
||||
# if UINT_MAX != 4294967295UL
|
||||
# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
|
||||
@@ -365,6 +366,11 @@ static unsigned LZ4_isLittleEndian(void)
|
||||
return one.c[0];
|
||||
}
|
||||
|
||||
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
|
||||
#define LZ4_PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__))
|
||||
#elif defined(_MSC_VER)
|
||||
#define LZ4_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop))
|
||||
#endif
|
||||
|
||||
#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
|
||||
/* lie to the compiler about data alignment; use with caution */
|
||||
@@ -380,9 +386,9 @@ static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
|
||||
|
||||
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
||||
/* currently only defined for gcc and icc */
|
||||
typedef struct { U16 u16; } __attribute__((packed)) LZ4_unalign16;
|
||||
typedef struct { U32 u32; } __attribute__((packed)) LZ4_unalign32;
|
||||
typedef struct { reg_t uArch; } __attribute__((packed)) LZ4_unalignST;
|
||||
LZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16;
|
||||
LZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32;
|
||||
LZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST;
|
||||
|
||||
static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; }
|
||||
static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; }
|
||||
@@ -427,10 +433,22 @@ static U16 LZ4_readLE16(const void* memPtr)
|
||||
return LZ4_read16(memPtr);
|
||||
} else {
|
||||
const BYTE* p = (const BYTE*)memPtr;
|
||||
return (U16)((U16)p[0] + (p[1]<<8));
|
||||
return (U16)((U16)p[0] | (p[1]<<8));
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT
|
||||
static U32 LZ4_readLE32(const void* memPtr)
|
||||
{
|
||||
if (LZ4_isLittleEndian()) {
|
||||
return LZ4_read32(memPtr);
|
||||
} else {
|
||||
const BYTE* p = (const BYTE*)memPtr;
|
||||
return (U32)p[0] | (p[1]<<8) | (p[2]<<16) | (p[3]<<24);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void LZ4_writeLE16(void* memPtr, U16 value)
|
||||
{
|
||||
if (LZ4_isLittleEndian()) {
|
||||
@@ -460,13 +478,15 @@ static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
|
||||
#ifndef LZ4_FAST_DEC_LOOP
|
||||
# if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
|
||||
# define LZ4_FAST_DEC_LOOP 1
|
||||
# elif defined(__aarch64__) && defined(__APPLE__)
|
||||
# define LZ4_FAST_DEC_LOOP 1
|
||||
# elif defined(__aarch64__) && !defined(__clang__)
|
||||
/* On non-Apple aarch64, we disable this optimization for clang because
|
||||
# elif defined(__aarch64__)
|
||||
# if defined(__clang__) && defined(__ANDROID__)
|
||||
/* On Android aarch64, we disable this optimization for clang because
|
||||
* on certain mobile chipsets, performance is reduced with clang. For
|
||||
* more information refer to https://github.com/lz4/lz4/pull/707 */
|
||||
# define LZ4_FAST_DEC_LOOP 1
|
||||
# define LZ4_FAST_DEC_LOOP 0
|
||||
# else
|
||||
# define LZ4_FAST_DEC_LOOP 1
|
||||
# endif
|
||||
# else
|
||||
# define LZ4_FAST_DEC_LOOP 0
|
||||
# endif
|
||||
@@ -512,7 +532,7 @@ LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
|
||||
|
||||
/* LZ4_memcpy_using_offset() presumes :
|
||||
* - dstEnd >= dstPtr + MINMATCH
|
||||
* - there is at least 8 bytes available to write after dstEnd */
|
||||
* - there is at least 12 bytes available to write after dstEnd */
|
||||
LZ4_FORCE_INLINE void
|
||||
LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
|
||||
{
|
||||
@@ -527,12 +547,12 @@ LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const si
|
||||
case 2:
|
||||
LZ4_memcpy(v, srcPtr, 2);
|
||||
LZ4_memcpy(&v[2], srcPtr, 2);
|
||||
#if defined(_MSC_VER) && (_MSC_VER <= 1936) /* MSVC 2022 ver 17.6 or earlier */
|
||||
#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */
|
||||
# pragma warning(push)
|
||||
# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */
|
||||
#endif
|
||||
LZ4_memcpy(&v[4], v, 4);
|
||||
#if defined(_MSC_VER) && (_MSC_VER <= 1936) /* MSVC 2022 ver 17.6 or earlier */
|
||||
#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */
|
||||
# pragma warning(pop)
|
||||
#endif
|
||||
break;
|
||||
@@ -779,7 +799,12 @@ LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
|
||||
LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
|
||||
{
|
||||
if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
|
||||
|
||||
#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT
|
||||
return LZ4_hash4(LZ4_readLE32(p), tableType);
|
||||
#else
|
||||
return LZ4_hash4(LZ4_read32(p), tableType);
|
||||
#endif
|
||||
}
|
||||
|
||||
LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
|
||||
@@ -873,7 +898,7 @@ LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
|
||||
|| tableType == byPtr
|
||||
|| inputSize >= 4 KB)
|
||||
{
|
||||
DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
|
||||
DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", (void*)cctx);
|
||||
MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
|
||||
cctx->currentOffset = 0;
|
||||
cctx->tableType = (U32)clearedTable;
|
||||
@@ -898,7 +923,7 @@ LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
|
||||
cctx->dictSize = 0;
|
||||
}
|
||||
|
||||
/** LZ4_compress_generic() :
|
||||
/** LZ4_compress_generic_validated() :
|
||||
* inlined, to ensure branches are decided at compilation time.
|
||||
* The following conditions are presumed already validated:
|
||||
* - source != NULL
|
||||
@@ -1080,7 +1105,10 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
|
||||
|
||||
/* Catch up */
|
||||
filledIp = ip;
|
||||
while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
|
||||
assert(ip > anchor); /* this is always true as ip has been advanced before entering the main loop */
|
||||
if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) {
|
||||
do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1])));
|
||||
}
|
||||
|
||||
/* Encode Literals */
|
||||
{ unsigned const litLength = (unsigned)(ip - anchor);
|
||||
@@ -1095,7 +1123,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
|
||||
goto _last_literals;
|
||||
}
|
||||
if (litLength >= RUN_MASK) {
|
||||
int len = (int)(litLength - RUN_MASK);
|
||||
unsigned len = litLength - RUN_MASK;
|
||||
*token = (RUN_MASK<<ML_BITS);
|
||||
for(; len >= 255 ; len-=255) *op++ = 255;
|
||||
*op++ = (BYTE)len;
|
||||
@@ -1452,22 +1480,30 @@ int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacit
|
||||
/* Note!: This function leaves the stream in an unclean/broken state!
|
||||
* It is not safe to subsequently use the same state with a _fastReset() or
|
||||
* _continue() call without resetting it. */
|
||||
static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
|
||||
static int LZ4_compress_destSize_extState_internal(LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)
|
||||
{
|
||||
void* const s = LZ4_initStream(state, sizeof (*state));
|
||||
assert(s != NULL); (void)s;
|
||||
|
||||
if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
|
||||
return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
|
||||
return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, acceleration);
|
||||
} else {
|
||||
if (*srcSizePtr < LZ4_64Klimit) {
|
||||
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
|
||||
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, acceleration);
|
||||
} else {
|
||||
tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
|
||||
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
|
||||
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, acceleration);
|
||||
} }
|
||||
}
|
||||
|
||||
int LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)
|
||||
{
|
||||
int const r = LZ4_compress_destSize_extState_internal((LZ4_stream_t*)state, src, dst, srcSizePtr, targetDstSize, acceleration);
|
||||
/* clean the state on exit */
|
||||
LZ4_initStream(state, sizeof (LZ4_stream_t));
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
|
||||
{
|
||||
@@ -1479,7 +1515,7 @@ int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targe
|
||||
LZ4_stream_t* const ctx = &ctxBody;
|
||||
#endif
|
||||
|
||||
int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
|
||||
int result = LZ4_compress_destSize_extState_internal(ctx, src, dst, srcSizePtr, targetDstSize, 1);
|
||||
|
||||
#if (LZ4_HEAPMODE)
|
||||
FREEMEM(ctx);
|
||||
@@ -1498,7 +1534,7 @@ LZ4_stream_t* LZ4_createStream(void)
|
||||
{
|
||||
LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
|
||||
LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal));
|
||||
DEBUGLOG(4, "LZ4_createStream %p", lz4s);
|
||||
DEBUGLOG(4, "LZ4_createStream %p", (void*)lz4s);
|
||||
if (lz4s == NULL) return NULL;
|
||||
LZ4_initStream(lz4s, sizeof(*lz4s));
|
||||
return lz4s;
|
||||
@@ -1529,7 +1565,7 @@ LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
|
||||
* prefer initStream() which is more general */
|
||||
void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
|
||||
{
|
||||
DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
|
||||
DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", (void*)LZ4_stream);
|
||||
MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
|
||||
}
|
||||
|
||||
@@ -1541,15 +1577,18 @@ void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
|
||||
int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
|
||||
{
|
||||
if (!LZ4_stream) return 0; /* support free on NULL */
|
||||
DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
|
||||
DEBUGLOG(5, "LZ4_freeStream %p", (void*)LZ4_stream);
|
||||
FREEMEM(LZ4_stream);
|
||||
return (0);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
typedef enum { _ld_fast, _ld_slow } LoadDict_mode_e;
|
||||
#define HASH_UNIT sizeof(reg_t)
|
||||
int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
|
||||
int LZ4_loadDict_internal(LZ4_stream_t* LZ4_dict,
|
||||
const char* dictionary, int dictSize,
|
||||
LoadDict_mode_e _ld)
|
||||
{
|
||||
LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
|
||||
const tableType_t tableType = byU32;
|
||||
@@ -1557,7 +1596,7 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
|
||||
const BYTE* const dictEnd = p + dictSize;
|
||||
U32 idx32;
|
||||
|
||||
DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
|
||||
DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, (void*)dictionary, (void*)LZ4_dict);
|
||||
|
||||
/* It's necessary to reset the context,
|
||||
* and not just continue it with prepareTable()
|
||||
@@ -1585,20 +1624,46 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
|
||||
|
||||
while (p <= dictEnd-HASH_UNIT) {
|
||||
U32 const h = LZ4_hashPosition(p, tableType);
|
||||
/* Note: overwriting => favors positions end of dictionary */
|
||||
LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
|
||||
p+=3; idx32+=3;
|
||||
}
|
||||
|
||||
if (_ld == _ld_slow) {
|
||||
/* Fill hash table with additional references, to improve compression capability */
|
||||
p = dict->dictionary;
|
||||
idx32 = dict->currentOffset - dict->dictSize;
|
||||
while (p <= dictEnd-HASH_UNIT) {
|
||||
U32 const h = LZ4_hashPosition(p, tableType);
|
||||
U32 const limit = dict->currentOffset - 64 KB;
|
||||
if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <= limit) {
|
||||
/* Note: not overwriting => favors positions beginning of dictionary */
|
||||
LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
|
||||
}
|
||||
p++; idx32++;
|
||||
}
|
||||
}
|
||||
|
||||
return (int)dict->dictSize;
|
||||
}
|
||||
|
||||
int LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
|
||||
{
|
||||
return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast);
|
||||
}
|
||||
|
||||
int LZ4_loadDictSlow(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
|
||||
{
|
||||
return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow);
|
||||
}
|
||||
|
||||
void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream)
|
||||
{
|
||||
const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL :
|
||||
&(dictionaryStream->internal_donotuse);
|
||||
|
||||
DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
|
||||
workingStream, dictionaryStream,
|
||||
(void*)workingStream, (void*)dictionaryStream,
|
||||
dictCtx != NULL ? dictCtx->dictSize : 0);
|
||||
|
||||
if (dictCtx != NULL) {
|
||||
@@ -1662,7 +1727,7 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
|
||||
&& (inputSize > 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */
|
||||
&& (streamPtr->dictCtx == NULL) /* usingDictCtx */
|
||||
) {
|
||||
DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary);
|
||||
DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, (void*)streamPtr->dictionary);
|
||||
/* remove dictionary existence from history, to employ faster prefix mode */
|
||||
streamPtr->dictSize = 0;
|
||||
streamPtr->dictionary = (const BYTE*)source;
|
||||
@@ -1752,7 +1817,7 @@ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
|
||||
{
|
||||
LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
|
||||
|
||||
DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize, safeBuffer);
|
||||
DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize, (void*)safeBuffer);
|
||||
|
||||
if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
|
||||
if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
|
||||
@@ -1923,6 +1988,17 @@ read_variable_length(const BYTE** ip, const BYTE* ilimit,
|
||||
if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */
|
||||
return rvl_error;
|
||||
}
|
||||
s = **ip;
|
||||
(*ip)++;
|
||||
length += s;
|
||||
if (unlikely((*ip) > ilimit)) { /* read limit reached */
|
||||
return rvl_error;
|
||||
}
|
||||
/* accumulator overflow detection (32-bit mode only) */
|
||||
if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
|
||||
return rvl_error;
|
||||
}
|
||||
if (likely(s != 255)) return length;
|
||||
do {
|
||||
s = **ip;
|
||||
(*ip)++;
|
||||
@@ -1931,10 +2007,10 @@ read_variable_length(const BYTE** ip, const BYTE* ilimit,
|
||||
return rvl_error;
|
||||
}
|
||||
/* accumulator overflow detection (32-bit mode only) */
|
||||
if ((sizeof(length)<8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
|
||||
if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
|
||||
return rvl_error;
|
||||
}
|
||||
} while (s==255);
|
||||
} while (s == 255);
|
||||
|
||||
return length;
|
||||
}
|
||||
@@ -2000,7 +2076,7 @@ LZ4_decompress_generic(
|
||||
* note : fast loop may show a regression for some client arm chips. */
|
||||
#if LZ4_FAST_DEC_LOOP
|
||||
if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
|
||||
DEBUGLOG(6, "skip fast decode loop");
|
||||
DEBUGLOG(6, "move to safe decode loop");
|
||||
goto safe_decode;
|
||||
}
|
||||
|
||||
@@ -2012,6 +2088,7 @@ LZ4_decompress_generic(
|
||||
assert(ip < iend);
|
||||
token = *ip++;
|
||||
length = token >> ML_BITS; /* literal length */
|
||||
DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
|
||||
|
||||
/* decode literal length */
|
||||
if (length == RUN_MASK) {
|
||||
@@ -2025,49 +2102,47 @@ LZ4_decompress_generic(
|
||||
if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
|
||||
|
||||
/* copy literals */
|
||||
cpy = op+length;
|
||||
LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
|
||||
if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
|
||||
LZ4_wildCopy32(op, ip, cpy);
|
||||
ip += length; op = cpy;
|
||||
} else {
|
||||
cpy = op+length;
|
||||
DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
|
||||
if ((op+length>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
|
||||
LZ4_wildCopy32(op, ip, op+length);
|
||||
ip += length; op += length;
|
||||
} else if (ip <= iend-(16 + 1/*max lit + offset + nextToken*/)) {
|
||||
/* We don't need to check oend, since we check it once for each loop below */
|
||||
if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
|
||||
DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
|
||||
/* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */
|
||||
LZ4_memcpy(op, ip, 16);
|
||||
ip += length; op = cpy;
|
||||
ip += length; op += length;
|
||||
} else {
|
||||
goto safe_literal_copy;
|
||||
}
|
||||
|
||||
/* get offset */
|
||||
offset = LZ4_readLE16(ip); ip+=2;
|
||||
DEBUGLOG(6, " offset = %zu", offset);
|
||||
DEBUGLOG(6, "blockPos%6u: offset = %u", (unsigned)(op-(BYTE*)dst), (unsigned)offset);
|
||||
match = op - offset;
|
||||
assert(match <= op); /* overflow check */
|
||||
|
||||
/* get matchlength */
|
||||
length = token & ML_MASK;
|
||||
DEBUGLOG(7, " match length token = %u (len==%u)", (unsigned)length, (unsigned)length+MINMATCH);
|
||||
|
||||
if (length == ML_MASK) {
|
||||
size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
|
||||
if (addl == rvl_error) {
|
||||
DEBUGLOG(6, "error reading long match length");
|
||||
DEBUGLOG(5, "error reading long match length");
|
||||
goto _output_error;
|
||||
}
|
||||
length += addl;
|
||||
length += MINMATCH;
|
||||
DEBUGLOG(7, " long match length == %u", (unsigned)length);
|
||||
if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
|
||||
if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
|
||||
DEBUGLOG(6, "Error : offset outside buffers");
|
||||
goto _output_error;
|
||||
}
|
||||
if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
|
||||
goto safe_match_copy;
|
||||
}
|
||||
} else {
|
||||
length += MINMATCH;
|
||||
if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
|
||||
DEBUGLOG(7, "moving to safe_match_copy (ml==%u)", (unsigned)length);
|
||||
goto safe_match_copy;
|
||||
}
|
||||
|
||||
@@ -2086,7 +2161,7 @@ LZ4_decompress_generic(
|
||||
} } }
|
||||
|
||||
if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) {
|
||||
DEBUGLOG(6, "Error : pos=%zi, offset=%zi => outside buffers", op-lowPrefix, op-match);
|
||||
DEBUGLOG(5, "Error : pos=%zi, offset=%zi => outside buffers", op-lowPrefix, op-match);
|
||||
goto _output_error;
|
||||
}
|
||||
/* match starting within external dictionary */
|
||||
@@ -2143,6 +2218,7 @@ LZ4_decompress_generic(
|
||||
assert(ip < iend);
|
||||
token = *ip++;
|
||||
length = token >> ML_BITS; /* literal length */
|
||||
DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
|
||||
|
||||
/* A two-stage shortcut for the most common case:
|
||||
* 1) If the literal length is 0..14, and there is enough space,
|
||||
@@ -2163,6 +2239,7 @@ LZ4_decompress_generic(
|
||||
/* The second stage: prepare for match copying, decode full info.
|
||||
* If it doesn't work out, the info won't be wasted. */
|
||||
length = token & ML_MASK; /* match length */
|
||||
DEBUGLOG(7, "blockPos%6u: matchLength token = %u (len=%u)", (unsigned)(op-(BYTE*)dst), (unsigned)length, (unsigned)length + 4);
|
||||
offset = LZ4_readLE16(ip); ip += 2;
|
||||
match = op - offset;
|
||||
assert(match <= op); /* check overflow */
|
||||
@@ -2194,11 +2271,12 @@ LZ4_decompress_generic(
|
||||
if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
|
||||
}
|
||||
|
||||
/* copy literals */
|
||||
cpy = op+length;
|
||||
#if LZ4_FAST_DEC_LOOP
|
||||
safe_literal_copy:
|
||||
#endif
|
||||
/* copy literals */
|
||||
cpy = op+length;
|
||||
|
||||
LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
|
||||
if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) {
|
||||
/* We've either hit the input parsing restriction or the output parsing restriction.
|
||||
@@ -2234,9 +2312,10 @@ LZ4_decompress_generic(
|
||||
* so check that we exactly consume the input and don't overrun the output buffer.
|
||||
*/
|
||||
if ((ip+length != iend) || (cpy > oend)) {
|
||||
DEBUGLOG(6, "should have been last run of literals")
|
||||
DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
|
||||
DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
|
||||
DEBUGLOG(5, "should have been last run of literals")
|
||||
DEBUGLOG(5, "ip(%p) + length(%i) = %p != iend (%p)", (void*)ip, (int)length, (void*)(ip+length), (void*)iend);
|
||||
DEBUGLOG(5, "or cpy(%p) > (oend-MFLIMIT)(%p)", (void*)cpy, (void*)(oend-MFLIMIT));
|
||||
DEBUGLOG(5, "after writing %u bytes / %i bytes available", (unsigned)(op-(BYTE*)dst), outputSize);
|
||||
goto _output_error;
|
||||
}
|
||||
}
|
||||
@@ -2262,6 +2341,7 @@ LZ4_decompress_generic(
|
||||
|
||||
/* get matchlength */
|
||||
length = token & ML_MASK;
|
||||
DEBUGLOG(7, "blockPos%6u: matchLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
|
||||
|
||||
_copy_match:
|
||||
if (length == ML_MASK) {
|
||||
@@ -2351,7 +2431,7 @@ LZ4_decompress_generic(
|
||||
while (op < cpy) { *op++ = *match++; }
|
||||
} else {
|
||||
LZ4_memcpy(op, match, 8);
|
||||
if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
|
||||
if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
|
||||
}
|
||||
op = cpy; /* wildcopy correction */
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* LZ4 - Fast LZ compression algorithm
|
||||
* Header File
|
||||
* Copyright (C) 2011-2020, Yann Collet.
|
||||
* Copyright (c) Yann Collet. All rights reserved.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
@@ -129,8 +129,8 @@ extern "C" {
|
||||
|
||||
/*------ Version ------*/
|
||||
#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
|
||||
#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */
|
||||
#define LZ4_VERSION_RELEASE 4 /* for tweaks, bug-fixes, or development */
|
||||
#define LZ4_VERSION_MINOR 10 /* for new (non-breaking) interface capabilities */
|
||||
#define LZ4_VERSION_RELEASE 0 /* for tweaks, bug-fixes, or development */
|
||||
|
||||
#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
|
||||
|
||||
@@ -144,23 +144,25 @@ LZ4LIB_API const char* LZ4_versionString (void); /**< library version string;
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Tuning parameter
|
||||
* Tuning memory usage
|
||||
**************************************/
|
||||
#define LZ4_MEMORY_USAGE_MIN 10
|
||||
#define LZ4_MEMORY_USAGE_DEFAULT 14
|
||||
#define LZ4_MEMORY_USAGE_MAX 20
|
||||
|
||||
/*!
|
||||
* LZ4_MEMORY_USAGE :
|
||||
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; )
|
||||
* Increasing memory usage improves compression ratio, at the cost of speed.
|
||||
* Can be selected at compile time, by setting LZ4_MEMORY_USAGE.
|
||||
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB)
|
||||
* Increasing memory usage improves compression ratio, generally at the cost of speed.
|
||||
* Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality.
|
||||
* Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
|
||||
* Default value is 14, for 16KB, which nicely fits into most L1 caches.
|
||||
*/
|
||||
#ifndef LZ4_MEMORY_USAGE
|
||||
# define LZ4_MEMORY_USAGE LZ4_MEMORY_USAGE_DEFAULT
|
||||
#endif
|
||||
|
||||
/* These are absolute limits, they should not be changed by users */
|
||||
#define LZ4_MEMORY_USAGE_MIN 10
|
||||
#define LZ4_MEMORY_USAGE_DEFAULT 14
|
||||
#define LZ4_MEMORY_USAGE_MAX 20
|
||||
|
||||
#if (LZ4_MEMORY_USAGE < LZ4_MEMORY_USAGE_MIN)
|
||||
# error "LZ4_MEMORY_USAGE is too small !"
|
||||
#endif
|
||||
@@ -191,7 +193,7 @@ LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int
|
||||
/*! LZ4_decompress_safe() :
|
||||
* @compressedSize : is the exact complete size of the compressed block.
|
||||
* @dstCapacity : is the size of destination buffer (which must be already allocated),
|
||||
* is an upper bound of decompressed size.
|
||||
* presumed an upper bound of decompressed size.
|
||||
* @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
|
||||
* If destination buffer is not large enough, decoding will stop and output an error code (negative value).
|
||||
* If the source stream is detected malformed, the function will stop decoding and return a negative result.
|
||||
@@ -243,23 +245,25 @@ LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int d
|
||||
LZ4LIB_API int LZ4_sizeofState(void);
|
||||
LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
|
||||
|
||||
|
||||
/*! LZ4_compress_destSize() :
|
||||
* Reverse the logic : compresses as much data as possible from 'src' buffer
|
||||
* into already allocated buffer 'dst', of size >= 'targetDestSize'.
|
||||
* into already allocated buffer 'dst', of size >= 'dstCapacity'.
|
||||
* This function either compresses the entire 'src' content into 'dst' if it's large enough,
|
||||
* or fill 'dst' buffer completely with as much data as possible from 'src'.
|
||||
* note: acceleration parameter is fixed to "default".
|
||||
*
|
||||
* *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'.
|
||||
* *srcSizePtr : in+out parameter. Initially contains size of input.
|
||||
* Will be modified to indicate how many bytes where read from 'src' to fill 'dst'.
|
||||
* New value is necessarily <= input value.
|
||||
* @return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
|
||||
* @return : Nb bytes written into 'dst' (necessarily <= dstCapacity)
|
||||
* or 0 if compression fails.
|
||||
*
|
||||
* Note : from v1.8.2 to v1.9.1, this function had a bug (fixed in v1.9.2+):
|
||||
* the produced compressed content could, in specific circumstances,
|
||||
* require to be decompressed into a destination buffer larger
|
||||
* by at least 1 byte than the content to decompress.
|
||||
* Note : 'targetDstSize' must be >= 1, because it's the smallest valid lz4 payload.
|
||||
*
|
||||
* Note 2:from v1.8.2 to v1.9.1, this function had a bug (fixed in v1.9.2+):
|
||||
* the produced compressed content could, in rare circumstances,
|
||||
* require to be decompressed into a destination buffer
|
||||
* larger by at least 1 byte than decompressesSize.
|
||||
* If an application uses `LZ4_compress_destSize()`,
|
||||
* it's highly recommended to update liblz4 to v1.9.2 or better.
|
||||
* If this can't be done or ensured,
|
||||
@@ -267,8 +271,7 @@ LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* d
|
||||
* a dstCapacity which is > decompressedSize, by at least 1 byte.
|
||||
* See https://github.com/lz4/lz4/issues/859 for details
|
||||
*/
|
||||
LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
|
||||
|
||||
LZ4LIB_API int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize);
|
||||
|
||||
/*! LZ4_decompress_safe_partial() :
|
||||
* Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
|
||||
@@ -312,7 +315,7 @@ LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcS
|
||||
***********************************************/
|
||||
typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */
|
||||
|
||||
/**
|
||||
/*!
|
||||
Note about RC_INVOKED
|
||||
|
||||
- RC_INVOKED is predefined symbol of rc.exe (the resource compiler which is part of MSVC/Visual Studio).
|
||||
@@ -362,13 +365,58 @@ LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr);
|
||||
* LZ4_loadDict() triggers a reset, so any previous data will be forgotten.
|
||||
* The same dictionary will have to be loaded on decompression side for successful decoding.
|
||||
* Dictionary are useful for better compression of small data (KB range).
|
||||
* While LZ4 accept any input as dictionary,
|
||||
* results are generally better when using Zstandard's Dictionary Builder.
|
||||
* While LZ4 itself accepts any input as dictionary, dictionary efficiency is also a topic.
|
||||
* When in doubt, employ the Zstandard's Dictionary Builder.
|
||||
* Loading a size of 0 is allowed, and is the same as reset.
|
||||
* @return : loaded dictionary size, in bytes (necessarily <= 64 KB)
|
||||
* @return : loaded dictionary size, in bytes (note: only the last 64 KB are loaded)
|
||||
*/
|
||||
LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
|
||||
|
||||
/*! LZ4_loadDictSlow() : v1.10.0+
|
||||
* Same as LZ4_loadDict(),
|
||||
* but uses a bit more cpu to reference the dictionary content more thoroughly.
|
||||
* This is expected to slightly improve compression ratio.
|
||||
* The extra-cpu cost is likely worth it if the dictionary is re-used across multiple sessions.
|
||||
* @return : loaded dictionary size, in bytes (note: only the last 64 KB are loaded)
|
||||
*/
|
||||
LZ4LIB_API int LZ4_loadDictSlow(LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
|
||||
|
||||
/*! LZ4_attach_dictionary() : stable since v1.10.0
|
||||
*
|
||||
* This allows efficient re-use of a static dictionary multiple times.
|
||||
*
|
||||
* Rather than re-loading the dictionary buffer into a working context before
|
||||
* each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a
|
||||
* working LZ4_stream_t, this function introduces a no-copy setup mechanism,
|
||||
* in which the working stream references @dictionaryStream in-place.
|
||||
*
|
||||
* Several assumptions are made about the state of @dictionaryStream.
|
||||
* Currently, only states which have been prepared by LZ4_loadDict() or
|
||||
* LZ4_loadDictSlow() should be expected to work.
|
||||
*
|
||||
* Alternatively, the provided @dictionaryStream may be NULL,
|
||||
* in which case any existing dictionary stream is unset.
|
||||
*
|
||||
* If a dictionary is provided, it replaces any pre-existing stream history.
|
||||
* The dictionary contents are the only history that can be referenced and
|
||||
* logically immediately precede the data compressed in the first subsequent
|
||||
* compression call.
|
||||
*
|
||||
* The dictionary will only remain attached to the working stream through the
|
||||
* first compression call, at the end of which it is cleared.
|
||||
* @dictionaryStream stream (and source buffer) must remain in-place / accessible / unchanged
|
||||
* through the completion of the compression session.
|
||||
*
|
||||
* Note: there is no equivalent LZ4_attach_*() method on the decompression side
|
||||
* because there is no initialization cost, hence no need to share the cost across multiple sessions.
|
||||
* To decompress LZ4 blocks using dictionary, attached or not,
|
||||
* just employ the regular LZ4_setStreamDecode() for streaming,
|
||||
* or the stateless LZ4_decompress_safe_usingDict() for one-shot decompression.
|
||||
*/
|
||||
LZ4LIB_API void
|
||||
LZ4_attach_dictionary(LZ4_stream_t* workingStream,
|
||||
const LZ4_stream_t* dictionaryStream);
|
||||
|
||||
/*! LZ4_compress_fast_continue() :
|
||||
* Compress 'src' content using data from previously compressed blocks, for better compression ratio.
|
||||
* 'dst' buffer must be already allocated.
|
||||
@@ -546,9 +594,9 @@ LZ4_decompress_safe_partial_usingDict(const char* src, char* dst,
|
||||
#define LZ4_STATIC_3504398509
|
||||
|
||||
#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS
|
||||
#define LZ4LIB_STATIC_API LZ4LIB_API
|
||||
# define LZ4LIB_STATIC_API LZ4LIB_API
|
||||
#else
|
||||
#define LZ4LIB_STATIC_API
|
||||
# define LZ4LIB_STATIC_API
|
||||
#endif
|
||||
|
||||
|
||||
@@ -564,36 +612,11 @@ LZ4_decompress_safe_partial_usingDict(const char* src, char* dst,
|
||||
*/
|
||||
LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
|
||||
|
||||
/*! LZ4_attach_dictionary() :
|
||||
* This is an experimental API that allows
|
||||
* efficient use of a static dictionary many times.
|
||||
*
|
||||
* Rather than re-loading the dictionary buffer into a working context before
|
||||
* each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a
|
||||
* working LZ4_stream_t, this function introduces a no-copy setup mechanism,
|
||||
* in which the working stream references the dictionary stream in-place.
|
||||
*
|
||||
* Several assumptions are made about the state of the dictionary stream.
|
||||
* Currently, only streams which have been prepared by LZ4_loadDict() should
|
||||
* be expected to work.
|
||||
*
|
||||
* Alternatively, the provided dictionaryStream may be NULL,
|
||||
* in which case any existing dictionary stream is unset.
|
||||
*
|
||||
* If a dictionary is provided, it replaces any pre-existing stream history.
|
||||
* The dictionary contents are the only history that can be referenced and
|
||||
* logically immediately precede the data compressed in the first subsequent
|
||||
* compression call.
|
||||
*
|
||||
* The dictionary will only remain attached to the working stream through the
|
||||
* first compression call, at the end of which it is cleared. The dictionary
|
||||
* stream (and source buffer) must remain in-place / accessible / unchanged
|
||||
* through the completion of the first compression call on the stream.
|
||||
/*! LZ4_compress_destSize_extState() : introduced in v1.10.0
|
||||
* Same as LZ4_compress_destSize(), but using an externally allocated state.
|
||||
* Also: exposes @acceleration
|
||||
*/
|
||||
LZ4LIB_STATIC_API void
|
||||
LZ4_attach_dictionary(LZ4_stream_t* workingStream,
|
||||
const LZ4_stream_t* dictionaryStream);
|
||||
|
||||
int LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration);
|
||||
|
||||
/*! In-place compression and decompression
|
||||
*
|
||||
@@ -677,10 +700,10 @@ LZ4_attach_dictionary(LZ4_stream_t* workingStream,
|
||||
|
||||
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# include <stdint.h>
|
||||
typedef int8_t LZ4_i8;
|
||||
typedef uint8_t LZ4_byte;
|
||||
typedef uint16_t LZ4_u16;
|
||||
typedef uint32_t LZ4_u32;
|
||||
typedef int8_t LZ4_i8;
|
||||
typedef unsigned char LZ4_byte;
|
||||
typedef uint16_t LZ4_u16;
|
||||
typedef uint32_t LZ4_u32;
|
||||
#else
|
||||
typedef signed char LZ4_i8;
|
||||
typedef unsigned char LZ4_byte;
|
||||
@@ -705,7 +728,7 @@ struct LZ4_stream_t_internal {
|
||||
/* Implicit padding to ensure structure is aligned */
|
||||
};
|
||||
|
||||
#define LZ4_STREAM_MINSIZE ((1UL << LZ4_MEMORY_USAGE) + 32) /* static size, for inter-version compatibility */
|
||||
#define LZ4_STREAM_MINSIZE ((1UL << (LZ4_MEMORY_USAGE)) + 32) /* static size, for inter-version compatibility */
|
||||
union LZ4_stream_u {
|
||||
char minStateSize[LZ4_STREAM_MINSIZE];
|
||||
LZ4_stream_t_internal internal_donotuse;
|
||||
@@ -726,7 +749,7 @@ union LZ4_stream_u {
|
||||
* Note2: An LZ4_stream_t structure guarantees correct alignment and size.
|
||||
* Note3: Before v1.9.0, use LZ4_resetStream() instead
|
||||
**/
|
||||
LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size);
|
||||
LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* stateBuffer, size_t size);
|
||||
|
||||
|
||||
/*! LZ4_streamDecode_t :
|
||||
@@ -838,11 +861,12 @@ LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4
|
||||
* But they may happen if input data is invalid (error or intentional tampering).
|
||||
* As a consequence, use these functions in trusted environments with trusted data **only**.
|
||||
*/
|
||||
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead")
|
||||
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_partial() instead")
|
||||
LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
|
||||
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead")
|
||||
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider migrating towards LZ4_decompress_safe_continue() instead. "
|
||||
"Note that the contract will change (requires block's compressed size, instead of decompressed size)")
|
||||
LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize);
|
||||
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead")
|
||||
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_partial_usingDict() instead")
|
||||
LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize);
|
||||
|
||||
/*! LZ4_resetStream() :
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* LZ4 auto-framing library
|
||||
* Copyright (C) 2011-2016, Yann Collet.
|
||||
* Copyright (c) Yann Collet. All rights reserved.
|
||||
*
|
||||
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
*
|
||||
@@ -44,6 +44,7 @@
|
||||
/*-************************************
|
||||
* Compiler Options
|
||||
**************************************/
|
||||
#include <limits.h>
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
#endif
|
||||
@@ -125,8 +126,9 @@ static void* LZ4F_malloc(size_t s, LZ4F_CustomMem cmem)
|
||||
|
||||
static void LZ4F_free(void* p, LZ4F_CustomMem cmem)
|
||||
{
|
||||
/* custom malloc defined : use it */
|
||||
if (p == NULL) return;
|
||||
if (cmem.customFree != NULL) {
|
||||
/* custom allocation defined : use it */
|
||||
cmem.customFree(cmem.opaqueState, p);
|
||||
return;
|
||||
}
|
||||
@@ -153,7 +155,7 @@ static void LZ4F_free(void* p, LZ4F_CustomMem cmem)
|
||||
static int g_debuglog_enable = 1;
|
||||
# define DEBUGLOG(l, ...) { \
|
||||
if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
|
||||
fprintf(stderr, __FILE__ ": "); \
|
||||
fprintf(stderr, __FILE__ " %i: ", __LINE__ ); \
|
||||
fprintf(stderr, __VA_ARGS__); \
|
||||
fprintf(stderr, " \n"); \
|
||||
} }
|
||||
@@ -186,9 +188,9 @@ static U32 LZ4F_readLE32 (const void* src)
|
||||
{
|
||||
const BYTE* const srcPtr = (const BYTE*)src;
|
||||
U32 value32 = srcPtr[0];
|
||||
value32 += ((U32)srcPtr[1])<< 8;
|
||||
value32 += ((U32)srcPtr[2])<<16;
|
||||
value32 += ((U32)srcPtr[3])<<24;
|
||||
value32 |= ((U32)srcPtr[1])<< 8;
|
||||
value32 |= ((U32)srcPtr[2])<<16;
|
||||
value32 |= ((U32)srcPtr[3])<<24;
|
||||
return value32;
|
||||
}
|
||||
|
||||
@@ -205,13 +207,13 @@ static U64 LZ4F_readLE64 (const void* src)
|
||||
{
|
||||
const BYTE* const srcPtr = (const BYTE*)src;
|
||||
U64 value64 = srcPtr[0];
|
||||
value64 += ((U64)srcPtr[1]<<8);
|
||||
value64 += ((U64)srcPtr[2]<<16);
|
||||
value64 += ((U64)srcPtr[3]<<24);
|
||||
value64 += ((U64)srcPtr[4]<<32);
|
||||
value64 += ((U64)srcPtr[5]<<40);
|
||||
value64 += ((U64)srcPtr[6]<<48);
|
||||
value64 += ((U64)srcPtr[7]<<56);
|
||||
value64 |= ((U64)srcPtr[1]<<8);
|
||||
value64 |= ((U64)srcPtr[2]<<16);
|
||||
value64 |= ((U64)srcPtr[3]<<24);
|
||||
value64 |= ((U64)srcPtr[4]<<32);
|
||||
value64 |= ((U64)srcPtr[5]<<40);
|
||||
value64 |= ((U64)srcPtr[6]<<48);
|
||||
value64 |= ((U64)srcPtr[7]<<56);
|
||||
return value64;
|
||||
}
|
||||
|
||||
@@ -257,7 +259,8 @@ static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checks
|
||||
* Structures and local types
|
||||
**************************************/
|
||||
|
||||
typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_blockCompression_t;
|
||||
typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_BlockCompressMode_e;
|
||||
typedef enum { ctxNone, ctxFast, ctxHC } LZ4F_CtxType_e;
|
||||
|
||||
typedef struct LZ4F_cctx_s
|
||||
{
|
||||
@@ -275,8 +278,8 @@ typedef struct LZ4F_cctx_s
|
||||
XXH32_state_t xxh;
|
||||
void* lz4CtxPtr;
|
||||
U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
|
||||
U16 lz4CtxState; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
|
||||
LZ4F_blockCompression_t blockCompression;
|
||||
U16 lz4CtxType; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
|
||||
LZ4F_BlockCompressMode_e blockCompressMode;
|
||||
} LZ4F_cctx_t;
|
||||
|
||||
|
||||
@@ -314,7 +317,12 @@ static LZ4F_errorCode_t LZ4F_returnErrorCode(LZ4F_errorCodes code)
|
||||
|
||||
#define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e)
|
||||
|
||||
#define RETURN_ERROR_IF(c,e) do { if (c) RETURN_ERROR(e); } while (0)
|
||||
#define RETURN_ERROR_IF(c,e) do { \
|
||||
if (c) { \
|
||||
DEBUGLOG(3, "Error: " #c); \
|
||||
RETURN_ERROR(e); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define FORWARD_IF_ERROR(r) do { if (LZ4F_isError(r)) return (r); } while (0)
|
||||
|
||||
@@ -429,6 +437,7 @@ size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
|
||||
BYTE* dstPtr = dstStart;
|
||||
BYTE* const dstEnd = dstStart + dstCapacity;
|
||||
|
||||
DEBUGLOG(4, "LZ4F_compressFrame_usingCDict (srcSize=%u)", (unsigned)srcSize);
|
||||
if (preferencesPtr!=NULL)
|
||||
prefs = *preferencesPtr;
|
||||
else
|
||||
@@ -494,7 +503,7 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
|
||||
LZ4_initStream(&lz4ctx, sizeof(lz4ctx));
|
||||
cctxPtr->lz4CtxPtr = &lz4ctx;
|
||||
cctxPtr->lz4CtxAlloc = 1;
|
||||
cctxPtr->lz4CtxState = 1;
|
||||
cctxPtr->lz4CtxType = ctxFast;
|
||||
}
|
||||
#endif
|
||||
DEBUGLOG(4, "LZ4F_compressFrame");
|
||||
@@ -530,27 +539,35 @@ LZ4F_CDict*
|
||||
LZ4F_createCDict_advanced(LZ4F_CustomMem cmem, const void* dictBuffer, size_t dictSize)
|
||||
{
|
||||
const char* dictStart = (const char*)dictBuffer;
|
||||
LZ4F_CDict* const cdict = (LZ4F_CDict*)LZ4F_malloc(sizeof(*cdict), cmem);
|
||||
LZ4F_CDict* cdict = NULL;
|
||||
|
||||
DEBUGLOG(4, "LZ4F_createCDict_advanced");
|
||||
if (!cdict) return NULL;
|
||||
|
||||
if (!dictStart)
|
||||
return NULL;
|
||||
cdict = (LZ4F_CDict*)LZ4F_malloc(sizeof(*cdict), cmem);
|
||||
if (!cdict)
|
||||
return NULL;
|
||||
|
||||
cdict->cmem = cmem;
|
||||
if (dictSize > 64 KB) {
|
||||
dictStart += dictSize - 64 KB;
|
||||
dictSize = 64 KB;
|
||||
}
|
||||
cdict->dictContent = LZ4F_malloc(dictSize, cmem);
|
||||
/* note: using @cmem to allocate => can't use default create */
|
||||
cdict->fastCtx = (LZ4_stream_t*)LZ4F_malloc(sizeof(LZ4_stream_t), cmem);
|
||||
if (cdict->fastCtx)
|
||||
LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t));
|
||||
cdict->HCCtx = (LZ4_streamHC_t*)LZ4F_malloc(sizeof(LZ4_streamHC_t), cmem);
|
||||
if (cdict->HCCtx)
|
||||
LZ4_initStream(cdict->HCCtx, sizeof(LZ4_streamHC_t));
|
||||
if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) {
|
||||
LZ4F_freeCDict(cdict);
|
||||
return NULL;
|
||||
}
|
||||
memcpy(cdict->dictContent, dictStart, dictSize);
|
||||
LZ4_loadDict (cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
|
||||
LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t));
|
||||
LZ4_loadDictSlow(cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
|
||||
LZ4_initStreamHC(cdict->HCCtx, sizeof(LZ4_streamHC_t));
|
||||
/* note: we don't know at this point which compression level is going to be used
|
||||
* as a consequence, HCCtx is created for the more common HC mode */
|
||||
LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT);
|
||||
LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize);
|
||||
return cdict;
|
||||
@@ -616,7 +633,6 @@ LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned v
|
||||
return LZ4F_OK_NoError;
|
||||
}
|
||||
|
||||
|
||||
LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
|
||||
{
|
||||
if (cctxPtr != NULL) { /* support free on NULL */
|
||||
@@ -641,7 +657,7 @@ static void LZ4F_initStream(void* ctx,
|
||||
int level,
|
||||
LZ4F_blockMode_t blockMode) {
|
||||
if (level < LZ4HC_CLEVEL_MIN) {
|
||||
if (cdict != NULL || blockMode == LZ4F_blockLinked) {
|
||||
if (cdict || blockMode == LZ4F_blockLinked) {
|
||||
/* In these cases, we will call LZ4_compress_fast_continue(),
|
||||
* which needs an already reset context. Otherwise, we'll call a
|
||||
* one-shot API. The non-continued APIs internally perform their own
|
||||
@@ -649,11 +665,18 @@ static void LZ4F_initStream(void* ctx,
|
||||
* tableType they need the context to be in. So in that case this
|
||||
* would be misguided / wasted work. */
|
||||
LZ4_resetStream_fast((LZ4_stream_t*)ctx);
|
||||
if (cdict)
|
||||
LZ4_attach_dictionary((LZ4_stream_t*)ctx, cdict->fastCtx);
|
||||
}
|
||||
LZ4_attach_dictionary((LZ4_stream_t *)ctx, cdict ? cdict->fastCtx : NULL);
|
||||
/* In these cases, we'll call a one-shot API.
|
||||
* The non-continued APIs internally perform their own resets
|
||||
* at the beginning of their calls, where they know
|
||||
* which tableType they need the context to be in.
|
||||
* Therefore, a reset here would be wasted work. */
|
||||
} else {
|
||||
LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level);
|
||||
LZ4_attach_HC_dictionary((LZ4_streamHC_t *)ctx, cdict ? cdict->HCCtx : NULL);
|
||||
if (cdict)
|
||||
LZ4_attach_HC_dictionary((LZ4_streamHC_t*)ctx, cdict->HCCtx);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -668,14 +691,19 @@ static int ctxTypeID_to_size(int ctxTypeID) {
|
||||
}
|
||||
}
|
||||
|
||||
/*! LZ4F_compressBegin_usingCDict() :
|
||||
* init streaming compression AND writes frame header into @dstBuffer.
|
||||
* @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
|
||||
* @return : number of bytes written into @dstBuffer for the header
|
||||
* or an error code (can be tested using LZ4F_isError())
|
||||
size_t LZ4F_cctx_size(const LZ4F_cctx* cctx) {
|
||||
if (cctx == NULL) {
|
||||
return 0;
|
||||
}
|
||||
return sizeof(*cctx) + cctx->maxBufferSize + ctxTypeID_to_size(cctx->lz4CtxAlloc);
|
||||
}
|
||||
|
||||
/* LZ4F_compressBegin_internal()
|
||||
* Note: only accepts @cdict _or_ @dictBuffer as non NULL.
|
||||
*/
|
||||
size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
|
||||
size_t LZ4F_compressBegin_internal(LZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const void* dictBuffer, size_t dictSize,
|
||||
const LZ4F_CDict* cdict,
|
||||
const LZ4F_preferences_t* preferencesPtr)
|
||||
{
|
||||
@@ -685,70 +713,85 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
|
||||
|
||||
RETURN_ERROR_IF(dstCapacity < maxFHSize, dstMaxSize_tooSmall);
|
||||
if (preferencesPtr == NULL) preferencesPtr = &prefNull;
|
||||
cctxPtr->prefs = *preferencesPtr;
|
||||
cctx->prefs = *preferencesPtr;
|
||||
DEBUGLOG(5, "LZ4F_compressBegin_internal: Independent_blocks=%u", cctx->prefs.frameInfo.blockMode);
|
||||
|
||||
/* cctx Management */
|
||||
{ U16 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;
|
||||
{ U16 const ctxTypeID = (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;
|
||||
int requiredSize = ctxTypeID_to_size(ctxTypeID);
|
||||
int allocatedSize = ctxTypeID_to_size(cctxPtr->lz4CtxAlloc);
|
||||
int allocatedSize = ctxTypeID_to_size(cctx->lz4CtxAlloc);
|
||||
if (allocatedSize < requiredSize) {
|
||||
/* not enough space allocated */
|
||||
LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem);
|
||||
if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
|
||||
LZ4F_free(cctx->lz4CtxPtr, cctx->cmem);
|
||||
if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
|
||||
/* must take ownership of memory allocation,
|
||||
* in order to respect custom allocator contract */
|
||||
cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctxPtr->cmem);
|
||||
if (cctxPtr->lz4CtxPtr)
|
||||
LZ4_initStream(cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t));
|
||||
cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctx->cmem);
|
||||
if (cctx->lz4CtxPtr)
|
||||
LZ4_initStream(cctx->lz4CtxPtr, sizeof(LZ4_stream_t));
|
||||
} else {
|
||||
cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctxPtr->cmem);
|
||||
if (cctxPtr->lz4CtxPtr)
|
||||
LZ4_initStreamHC(cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t));
|
||||
cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctx->cmem);
|
||||
if (cctx->lz4CtxPtr)
|
||||
LZ4_initStreamHC(cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t));
|
||||
}
|
||||
RETURN_ERROR_IF(cctxPtr->lz4CtxPtr == NULL, allocation_failed);
|
||||
cctxPtr->lz4CtxAlloc = ctxTypeID;
|
||||
cctxPtr->lz4CtxState = ctxTypeID;
|
||||
} else if (cctxPtr->lz4CtxState != ctxTypeID) {
|
||||
RETURN_ERROR_IF(cctx->lz4CtxPtr == NULL, allocation_failed);
|
||||
cctx->lz4CtxAlloc = ctxTypeID;
|
||||
cctx->lz4CtxType = ctxTypeID;
|
||||
} else if (cctx->lz4CtxType != ctxTypeID) {
|
||||
/* otherwise, a sufficient buffer is already allocated,
|
||||
* but we need to reset it to the correct context type */
|
||||
if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
|
||||
LZ4_initStream((LZ4_stream_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t));
|
||||
if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
|
||||
LZ4_initStream((LZ4_stream_t*)cctx->lz4CtxPtr, sizeof(LZ4_stream_t));
|
||||
} else {
|
||||
LZ4_initStreamHC((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t));
|
||||
LZ4_setCompressionLevel((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel);
|
||||
LZ4_initStreamHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t));
|
||||
LZ4_setCompressionLevel((LZ4_streamHC_t*)cctx->lz4CtxPtr, cctx->prefs.compressionLevel);
|
||||
}
|
||||
cctxPtr->lz4CtxState = ctxTypeID;
|
||||
cctx->lz4CtxType = ctxTypeID;
|
||||
} }
|
||||
|
||||
/* Buffer Management */
|
||||
if (cctxPtr->prefs.frameInfo.blockSizeID == 0)
|
||||
cctxPtr->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
|
||||
cctxPtr->maxBlockSize = LZ4F_getBlockSize(cctxPtr->prefs.frameInfo.blockSizeID);
|
||||
if (cctx->prefs.frameInfo.blockSizeID == 0)
|
||||
cctx->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
|
||||
cctx->maxBlockSize = LZ4F_getBlockSize(cctx->prefs.frameInfo.blockSizeID);
|
||||
|
||||
{ size_t const requiredBuffSize = preferencesPtr->autoFlush ?
|
||||
((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */
|
||||
cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0);
|
||||
((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */
|
||||
cctx->maxBlockSize + ((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0);
|
||||
|
||||
if (cctxPtr->maxBufferSize < requiredBuffSize) {
|
||||
cctxPtr->maxBufferSize = 0;
|
||||
LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem);
|
||||
cctxPtr->tmpBuff = (BYTE*)LZ4F_malloc(requiredBuffSize, cctxPtr->cmem);
|
||||
RETURN_ERROR_IF(cctxPtr->tmpBuff == NULL, allocation_failed);
|
||||
cctxPtr->maxBufferSize = requiredBuffSize;
|
||||
if (cctx->maxBufferSize < requiredBuffSize) {
|
||||
cctx->maxBufferSize = 0;
|
||||
LZ4F_free(cctx->tmpBuff, cctx->cmem);
|
||||
cctx->tmpBuff = (BYTE*)LZ4F_malloc(requiredBuffSize, cctx->cmem);
|
||||
RETURN_ERROR_IF(cctx->tmpBuff == NULL, allocation_failed);
|
||||
cctx->maxBufferSize = requiredBuffSize;
|
||||
} }
|
||||
cctxPtr->tmpIn = cctxPtr->tmpBuff;
|
||||
cctxPtr->tmpInSize = 0;
|
||||
(void)XXH32_reset(&(cctxPtr->xxh), 0);
|
||||
cctx->tmpIn = cctx->tmpBuff;
|
||||
cctx->tmpInSize = 0;
|
||||
(void)XXH32_reset(&(cctx->xxh), 0);
|
||||
|
||||
/* context init */
|
||||
cctxPtr->cdict = cdict;
|
||||
if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
|
||||
cctx->cdict = cdict;
|
||||
if (cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
|
||||
/* frame init only for blockLinked : blockIndependent will be init at each block */
|
||||
LZ4F_initStream(cctxPtr->lz4CtxPtr, cdict, cctxPtr->prefs.compressionLevel, LZ4F_blockLinked);
|
||||
LZ4F_initStream(cctx->lz4CtxPtr, cdict, cctx->prefs.compressionLevel, LZ4F_blockLinked);
|
||||
}
|
||||
if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) {
|
||||
LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
|
||||
LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctx->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
|
||||
}
|
||||
if (dictBuffer) {
|
||||
assert(cdict == NULL);
|
||||
RETURN_ERROR_IF(dictSize > INT_MAX, parameter_invalid);
|
||||
if (cctx->lz4CtxType == ctxFast) {
|
||||
/* lz4 fast*/
|
||||
LZ4_loadDict((LZ4_stream_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize);
|
||||
} else {
|
||||
/* lz4hc */
|
||||
assert(cctx->lz4CtxType == ctxHC);
|
||||
LZ4_loadDictHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize);
|
||||
}
|
||||
}
|
||||
|
||||
/* Stage 2 : Write Frame Header */
|
||||
|
||||
/* Magic Number */
|
||||
LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER);
|
||||
@@ -757,22 +800,22 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
|
||||
|
||||
/* FLG Byte */
|
||||
*dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */
|
||||
+ ((cctxPtr->prefs.frameInfo.blockMode & _1BIT ) << 5)
|
||||
+ ((cctxPtr->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4)
|
||||
+ ((unsigned)(cctxPtr->prefs.frameInfo.contentSize > 0) << 3)
|
||||
+ ((cctxPtr->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)
|
||||
+ (cctxPtr->prefs.frameInfo.dictID > 0) );
|
||||
+ ((cctx->prefs.frameInfo.blockMode & _1BIT ) << 5)
|
||||
+ ((cctx->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4)
|
||||
+ ((unsigned)(cctx->prefs.frameInfo.contentSize > 0) << 3)
|
||||
+ ((cctx->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)
|
||||
+ (cctx->prefs.frameInfo.dictID > 0) );
|
||||
/* BD Byte */
|
||||
*dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4);
|
||||
*dstPtr++ = (BYTE)((cctx->prefs.frameInfo.blockSizeID & _3BITS) << 4);
|
||||
/* Optional Frame content size field */
|
||||
if (cctxPtr->prefs.frameInfo.contentSize) {
|
||||
LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize);
|
||||
if (cctx->prefs.frameInfo.contentSize) {
|
||||
LZ4F_writeLE64(dstPtr, cctx->prefs.frameInfo.contentSize);
|
||||
dstPtr += 8;
|
||||
cctxPtr->totalInSize = 0;
|
||||
cctx->totalInSize = 0;
|
||||
}
|
||||
/* Optional dictionary ID field */
|
||||
if (cctxPtr->prefs.frameInfo.dictID) {
|
||||
LZ4F_writeLE32(dstPtr, cctxPtr->prefs.frameInfo.dictID);
|
||||
if (cctx->prefs.frameInfo.dictID) {
|
||||
LZ4F_writeLE32(dstPtr, cctx->prefs.frameInfo.dictID);
|
||||
dstPtr += 4;
|
||||
}
|
||||
/* Header CRC Byte */
|
||||
@@ -780,24 +823,54 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
|
||||
dstPtr++;
|
||||
}
|
||||
|
||||
cctxPtr->cStage = 1; /* header written, now request input data block */
|
||||
cctx->cStage = 1; /* header written, now request input data block */
|
||||
return (size_t)(dstPtr - dstStart);
|
||||
}
|
||||
|
||||
|
||||
/*! LZ4F_compressBegin() :
|
||||
* init streaming compression AND writes frame header into @dstBuffer.
|
||||
* @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
|
||||
* @preferencesPtr can be NULL, in which case default parameters are selected.
|
||||
* @return : number of bytes written into dstBuffer for the header
|
||||
* or an error code (can be tested using LZ4F_isError())
|
||||
*/
|
||||
size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr,
|
||||
size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const LZ4F_preferences_t* preferencesPtr)
|
||||
{
|
||||
return LZ4F_compressBegin_usingCDict(cctxPtr, dstBuffer, dstCapacity,
|
||||
NULL, preferencesPtr);
|
||||
return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
|
||||
NULL, 0,
|
||||
NULL, preferencesPtr);
|
||||
}
|
||||
|
||||
/* LZ4F_compressBegin_usingDictOnce:
|
||||
* Hidden implementation,
|
||||
* employed for multi-threaded compression
|
||||
* when frame defines linked blocks */
|
||||
size_t LZ4F_compressBegin_usingDictOnce(LZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const void* dict, size_t dictSize,
|
||||
const LZ4F_preferences_t* preferencesPtr)
|
||||
{
|
||||
return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
|
||||
dict, dictSize,
|
||||
NULL, preferencesPtr);
|
||||
}
|
||||
|
||||
size_t LZ4F_compressBegin_usingDict(LZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const void* dict, size_t dictSize,
|
||||
const LZ4F_preferences_t* preferencesPtr)
|
||||
{
|
||||
/* note : incorrect implementation :
|
||||
* this will only use the dictionary once,
|
||||
* instead of once *per* block when frames defines independent blocks */
|
||||
return LZ4F_compressBegin_usingDictOnce(cctx, dstBuffer, dstCapacity,
|
||||
dict, dictSize,
|
||||
preferencesPtr);
|
||||
}
|
||||
|
||||
size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const LZ4F_CDict* cdict,
|
||||
const LZ4F_preferences_t* preferencesPtr)
|
||||
{
|
||||
return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
|
||||
NULL, 0,
|
||||
cdict, preferencesPtr);
|
||||
}
|
||||
|
||||
|
||||
@@ -829,10 +902,11 @@ static size_t LZ4F_makeBlock(void* dst,
|
||||
LZ4F_blockChecksum_t crcFlag)
|
||||
{
|
||||
BYTE* const cSizePtr = (BYTE*)dst;
|
||||
int dstCapacity = (srcSize > 1) ? (int)srcSize - 1 : 1;
|
||||
U32 cSize;
|
||||
assert(compress != NULL);
|
||||
cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize),
|
||||
(int)(srcSize), (int)(srcSize-1),
|
||||
(int)srcSize, dstCapacity,
|
||||
level, cdict);
|
||||
|
||||
if (cSize == 0 || cSize >= srcSize) {
|
||||
@@ -891,9 +965,10 @@ static int LZ4F_doNotCompressBlock(void* ctx, const char* src, char* dst, int sr
|
||||
return 0;
|
||||
}
|
||||
|
||||
static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_blockCompression_t compressMode)
|
||||
static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_BlockCompressMode_e compressMode)
|
||||
{
|
||||
if (compressMode == LZ4B_UNCOMPRESSED) return LZ4F_doNotCompressBlock;
|
||||
if (compressMode == LZ4B_UNCOMPRESSED)
|
||||
return LZ4F_doNotCompressBlock;
|
||||
if (level < LZ4HC_CLEVEL_MIN) {
|
||||
if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock;
|
||||
return LZ4F_compressBlock_continue;
|
||||
@@ -902,12 +977,13 @@ static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int lev
|
||||
return LZ4F_compressBlockHC_continue;
|
||||
}
|
||||
|
||||
/* Save history (up to 64KB) into @tmpBuff */
|
||||
static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
|
||||
/* Save or shorten history (up to 64KB) into @tmpBuff */
|
||||
static void LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
|
||||
{
|
||||
if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN)
|
||||
return LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
|
||||
return LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
|
||||
int const dictSize = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ?
|
||||
LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB) :
|
||||
LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
|
||||
cctxPtr->tmpIn = cctxPtr->tmpBuff + dictSize;
|
||||
}
|
||||
|
||||
typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus;
|
||||
@@ -915,7 +991,7 @@ typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus;
|
||||
static const LZ4F_compressOptions_t k_cOptionsNull = { 0, { 0, 0, 0 } };
|
||||
|
||||
|
||||
/*! LZ4F_compressUpdateImpl() :
|
||||
/*! LZ4F_compressUpdateImpl() :
|
||||
* LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
|
||||
* When successful, the function always entirely consumes @srcBuffer.
|
||||
* src data is either buffered or compressed into @dstBuffer.
|
||||
@@ -931,11 +1007,11 @@ static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const void* srcBuffer, size_t srcSize,
|
||||
const LZ4F_compressOptions_t* compressOptionsPtr,
|
||||
LZ4F_blockCompression_t blockCompression)
|
||||
LZ4F_BlockCompressMode_e blockCompression)
|
||||
{
|
||||
size_t const blockSize = cctxPtr->maxBlockSize;
|
||||
const BYTE* srcPtr = (const BYTE*)srcBuffer;
|
||||
const BYTE* const srcEnd = srcPtr + srcSize;
|
||||
const BYTE* const srcEnd = srcSize ? (assert(srcPtr!=NULL), srcPtr + srcSize) : srcPtr;
|
||||
BYTE* const dstStart = (BYTE*)dstBuffer;
|
||||
BYTE* dstPtr = dstStart;
|
||||
LZ4F_lastBlockStatus lastBlockCompressed = notDone;
|
||||
@@ -951,10 +1027,10 @@ static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr,
|
||||
RETURN_ERROR(dstMaxSize_tooSmall);
|
||||
|
||||
/* flush currently written block, to continue with new block compression */
|
||||
if (cctxPtr->blockCompression != blockCompression) {
|
||||
if (cctxPtr->blockCompressMode != blockCompression) {
|
||||
bytesWritten = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
|
||||
dstPtr += bytesWritten;
|
||||
cctxPtr->blockCompression = blockCompression;
|
||||
cctxPtr->blockCompressMode = blockCompression;
|
||||
}
|
||||
|
||||
if (compressOptionsPtr == NULL) compressOptionsPtr = &k_cOptionsNull;
|
||||
@@ -1013,9 +1089,7 @@ static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr,
|
||||
if (compressOptionsPtr->stableSrc) {
|
||||
cctxPtr->tmpIn = cctxPtr->tmpBuff; /* src is stable : dictionary remains in src across invocations */
|
||||
} else {
|
||||
int const realDictSize = LZ4F_localSaveDict(cctxPtr);
|
||||
assert(0 <= realDictSize && realDictSize <= 64 KB);
|
||||
cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
|
||||
LZ4F_localSaveDict(cctxPtr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1025,8 +1099,7 @@ static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr,
|
||||
{
|
||||
/* only preserve 64KB within internal buffer. Ensures there is enough room for next block.
|
||||
* note: this situation necessarily implies lastBlockCompressed==fromTmpBuffer */
|
||||
int const realDictSize = LZ4F_localSaveDict(cctxPtr);
|
||||
cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
|
||||
LZ4F_localSaveDict(cctxPtr);
|
||||
assert((cctxPtr->tmpIn + blockSize) <= (cctxPtr->tmpBuff + cctxPtr->maxBufferSize));
|
||||
}
|
||||
|
||||
@@ -1068,13 +1141,9 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
|
||||
compressOptionsPtr, LZ4B_COMPRESSED);
|
||||
}
|
||||
|
||||
/*! LZ4F_compressUpdate() :
|
||||
* LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
|
||||
* When successful, the function always entirely consumes @srcBuffer.
|
||||
* src data is either buffered or compressed into @dstBuffer.
|
||||
* If previously an uncompressed block was written, buffered data is flushed
|
||||
* before appending compressed data is continued.
|
||||
* This is only supported when LZ4F_blockIndependent is used
|
||||
/*! LZ4F_uncompressedUpdate() :
|
||||
* Same as LZ4F_compressUpdate(), but requests blocks to be sent uncompressed.
|
||||
* This symbol is only supported when LZ4F_blockIndependent is used
|
||||
* @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
|
||||
* @compressOptionsPtr is optional : provide NULL to mean "default".
|
||||
* @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
|
||||
@@ -1084,7 +1153,8 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
|
||||
size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const void* srcBuffer, size_t srcSize,
|
||||
const LZ4F_compressOptions_t* compressOptionsPtr) {
|
||||
const LZ4F_compressOptions_t* compressOptionsPtr)
|
||||
{
|
||||
return LZ4F_compressUpdateImpl(cctxPtr,
|
||||
dstBuffer, dstCapacity,
|
||||
srcBuffer, srcSize,
|
||||
@@ -1094,7 +1164,7 @@ size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr,
|
||||
|
||||
/*! LZ4F_flush() :
|
||||
* When compressed data must be sent immediately, without waiting for a block to be filled,
|
||||
* invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx.
|
||||
* invoke LZ4F_flush(), which will immediately compress any remaining data stored within LZ4F_cctx.
|
||||
* The result of the function is the number of bytes written into dstBuffer.
|
||||
* It can be zero, this means there was no data left within LZ4F_cctx.
|
||||
* The function outputs an error code if it fails (can be tested using LZ4F_isError())
|
||||
@@ -1108,13 +1178,15 @@ size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
|
||||
BYTE* dstPtr = dstStart;
|
||||
compressFunc_t compress;
|
||||
|
||||
DEBUGLOG(5, "LZ4F_flush: %zu buffered bytes (saved dict size = %i) (dstCapacity=%u)",
|
||||
cctxPtr->tmpInSize, (int)(cctxPtr->tmpIn - cctxPtr->tmpBuff), (unsigned)dstCapacity);
|
||||
if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */
|
||||
RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized);
|
||||
RETURN_ERROR_IF(dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize), dstMaxSize_tooSmall);
|
||||
(void)compressOptionsPtr; /* not useful (yet) */
|
||||
|
||||
/* select compression function */
|
||||
compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompression);
|
||||
compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompressMode);
|
||||
|
||||
/* compress tmp buffer */
|
||||
dstPtr += LZ4F_makeBlock(dstPtr,
|
||||
@@ -1129,9 +1201,9 @@ size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
|
||||
cctxPtr->tmpInSize = 0;
|
||||
|
||||
/* keep tmpIn within limits */
|
||||
if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { /* necessarily LZ4F_blockLinked */
|
||||
int const realDictSize = LZ4F_localSaveDict(cctxPtr);
|
||||
cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
|
||||
if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) {
|
||||
assert(cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked);
|
||||
LZ4F_localSaveDict(cctxPtr);
|
||||
}
|
||||
|
||||
return (size_t)(dstPtr - dstStart);
|
||||
@@ -1169,7 +1241,7 @@ size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
|
||||
if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
|
||||
U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
|
||||
RETURN_ERROR_IF(dstCapacity < 8, dstMaxSize_tooSmall);
|
||||
DEBUGLOG(5,"Writing 32-bit content checksum");
|
||||
DEBUGLOG(5,"Writing 32-bit content checksum (0x%0X)", xxh);
|
||||
LZ4F_writeLE32(dstPtr, xxh);
|
||||
dstPtr+=4; /* content Checksum */
|
||||
}
|
||||
@@ -1266,15 +1338,25 @@ LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx)
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t LZ4F_dctx_size(const LZ4F_dctx* dctx) {
|
||||
if (dctx == NULL) {
|
||||
return 0;
|
||||
}
|
||||
return sizeof(*dctx)
|
||||
+ (dctx->tmpIn != NULL ? dctx->maxBlockSize + BFSize : 0)
|
||||
+ (dctx->tmpOutBuffer != NULL ? dctx->maxBufferSize : 0);
|
||||
}
|
||||
|
||||
|
||||
/*==--- Streaming Decompression operations ---==*/
|
||||
|
||||
void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx)
|
||||
{
|
||||
DEBUGLOG(5, "LZ4F_resetDecompressionContext");
|
||||
dctx->dStage = dstage_getFrameHeader;
|
||||
dctx->dict = NULL;
|
||||
dctx->dictSize = 0;
|
||||
dctx->skipChecksum = 0;
|
||||
dctx->frameRemainingSize = 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1331,6 +1413,7 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
|
||||
if (((FLG>>1)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */
|
||||
if (version != 1) RETURN_ERROR(headerVersion_wrong); /* Version Number, only supported value */
|
||||
}
|
||||
DEBUGLOG(6, "contentSizeFlag: %u", contentSizeFlag);
|
||||
|
||||
/* Frame Header Size */
|
||||
frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
|
||||
@@ -1367,8 +1450,9 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
|
||||
dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag;
|
||||
dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID;
|
||||
dctx->maxBlockSize = LZ4F_getBlockSize((LZ4F_blockSizeID_t)blockSizeID);
|
||||
if (contentSizeFlag)
|
||||
if (contentSizeFlag) {
|
||||
dctx->frameRemainingSize = dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
|
||||
}
|
||||
if (dictIDFlag)
|
||||
dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5);
|
||||
|
||||
@@ -1427,6 +1511,10 @@ LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
|
||||
LZ4F_frameInfo_t* frameInfoPtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr)
|
||||
{
|
||||
assert(dctx != NULL);
|
||||
RETURN_ERROR_IF(frameInfoPtr == NULL, parameter_null);
|
||||
RETURN_ERROR_IF(srcSizePtr == NULL, parameter_null);
|
||||
|
||||
LZ4F_STATIC_ASSERT(dstage_getFrameHeader < dstage_storeFrameHeader);
|
||||
if (dctx->dStage > dstage_storeFrameHeader) {
|
||||
/* frameInfo already decoded */
|
||||
@@ -1568,7 +1656,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
||||
size_t nextSrcSizeHint = 1;
|
||||
|
||||
|
||||
DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u",
|
||||
DEBUGLOG(5, "LZ4F_decompress: src[%p](%u) => dst[%p](%u)",
|
||||
srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr);
|
||||
if (dstBuffer == NULL) assert(*dstSizePtr == 0);
|
||||
MEM_INIT(&optionsNull, 0, sizeof(optionsNull));
|
||||
@@ -1957,6 +2045,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
||||
if (!dctx->skipChecksum) {
|
||||
U32 const readCRC = LZ4F_readLE32(selectedIn);
|
||||
U32 const resultCRC = XXH32_digest(&(dctx->xxh));
|
||||
DEBUGLOG(4, "frame checksum: stored 0x%0X vs 0x%0X processed", readCRC, resultCRC);
|
||||
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
||||
RETURN_ERROR_IF(readCRC != resultCRC, contentChecksum_invalid);
|
||||
#else
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
/*
|
||||
LZ4F - LZ4-Frame library
|
||||
Header File
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
Copyright (c) Yann Collet. All rights reserved.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
@@ -173,16 +174,16 @@ typedef LZ4F_contentChecksum_t contentChecksum_t;
|
||||
* setting all parameters to default.
|
||||
* It's then possible to update selectively some parameters */
|
||||
typedef struct {
|
||||
LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default */
|
||||
LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default */
|
||||
LZ4F_contentChecksum_t contentChecksumFlag; /* 1: frame terminated with 32-bit checksum of decompressed data; 0: disabled (default) */
|
||||
LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default (LZ4F_max64KB) */
|
||||
LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default (LZ4F_blockLinked) */
|
||||
LZ4F_contentChecksum_t contentChecksumFlag; /* 1: add a 32-bit checksum of frame's decompressed data; 0 == default (disabled) */
|
||||
LZ4F_frameType_t frameType; /* read-only field : LZ4F_frame or LZ4F_skippableFrame */
|
||||
unsigned long long contentSize; /* Size of uncompressed content ; 0 == unknown */
|
||||
unsigned dictID; /* Dictionary ID, sent by compressor to help decoder select correct dictionary; 0 == no dictID provided */
|
||||
LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0: disabled (default) */
|
||||
LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0 == default (disabled) */
|
||||
} LZ4F_frameInfo_t;
|
||||
|
||||
#define LZ4F_INIT_FRAMEINFO { LZ4F_default, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */
|
||||
#define LZ4F_INIT_FRAMEINFO { LZ4F_max64KB, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */
|
||||
|
||||
/*! LZ4F_preferences_t :
|
||||
* makes it possible to supply advanced compression instructions to streaming interface.
|
||||
@@ -204,16 +205,6 @@ typedef struct {
|
||||
* Simple compression function
|
||||
***********************************/
|
||||
|
||||
LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */
|
||||
|
||||
/*! LZ4F_compressFrameBound() :
|
||||
* Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences.
|
||||
* `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences.
|
||||
* Note : this result is only usable with LZ4F_compressFrame().
|
||||
* It may also be relevant to LZ4F_compressUpdate() _only if_ no flush() operation is ever performed.
|
||||
*/
|
||||
LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
|
||||
|
||||
/*! LZ4F_compressFrame() :
|
||||
* Compress srcBuffer content into an LZ4-compressed frame.
|
||||
* It's a one shot operation, all input content is consumed, and all output is generated.
|
||||
@@ -235,6 +226,20 @@ LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
|
||||
const void* srcBuffer, size_t srcSize,
|
||||
const LZ4F_preferences_t* preferencesPtr);
|
||||
|
||||
/*! LZ4F_compressFrameBound() :
|
||||
* Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences.
|
||||
* `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences.
|
||||
* Note : this result is only usable with LZ4F_compressFrame().
|
||||
* It may also be relevant to LZ4F_compressUpdate() _only if_ no flush() operation is ever performed.
|
||||
*/
|
||||
LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
|
||||
|
||||
|
||||
/*! LZ4F_compressionLevel_max() :
|
||||
* @return maximum allowed compression level (currently: 12)
|
||||
*/
|
||||
LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */
|
||||
|
||||
|
||||
/*-***********************************
|
||||
* Advanced compression functions
|
||||
@@ -285,6 +290,9 @@ LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx);
|
||||
/* Size in bytes of the content checksum. */
|
||||
#define LZ4F_CONTENT_CHECKSUM_SIZE 4
|
||||
|
||||
/* Size in bytes of the endmark. */
|
||||
#define LZ4F_ENDMARK_SIZE 4
|
||||
|
||||
/*! LZ4F_compressBegin() :
|
||||
* will write the frame header into dstBuffer.
|
||||
* dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
|
||||
@@ -365,8 +373,9 @@ typedef struct LZ4F_dctx_s LZ4F_dctx; /* incomplete type */
|
||||
typedef LZ4F_dctx* LZ4F_decompressionContext_t; /* compatibility with previous API versions */
|
||||
|
||||
typedef struct {
|
||||
unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified between invocations.
|
||||
* This optimization skips storage operations in tmp buffers. */
|
||||
unsigned stableDst; /* pledges that last 64KB decompressed data is present right before @dstBuffer pointer.
|
||||
* This optimization skips internal storage operations.
|
||||
* Once set, this pledge must remain valid up to the end of current frame. */
|
||||
unsigned skipChecksums; /* disable checksum calculation and verification, even when one is present in frame, to save CPU time.
|
||||
* Setting this option to 1 once disables all checksums for the rest of the frame. */
|
||||
unsigned reserved1; /* must be set to zero for forward compatibility */
|
||||
@@ -508,6 +517,109 @@ LZ4F_decompress(LZ4F_dctx* dctx,
|
||||
LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always successful */
|
||||
|
||||
|
||||
/**********************************
|
||||
* Dictionary compression API
|
||||
*********************************/
|
||||
|
||||
/* A Dictionary is useful for the compression of small messages (KB range).
|
||||
* It dramatically improves compression efficiency.
|
||||
*
|
||||
* LZ4 can ingest any input as dictionary, though only the last 64 KB are useful.
|
||||
* Better results are generally achieved by using Zstandard's Dictionary Builder
|
||||
* to generate a high-quality dictionary from a set of samples.
|
||||
*
|
||||
* The same dictionary will have to be used on the decompression side
|
||||
* for decoding to be successful.
|
||||
* To help identify the correct dictionary at decoding stage,
|
||||
* the frame header allows optional embedding of a dictID field.
|
||||
*/
|
||||
|
||||
/*! LZ4F_compressBegin_usingDict() : stable since v1.10
|
||||
* Inits dictionary compression streaming, and writes the frame header into dstBuffer.
|
||||
* @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
|
||||
* @prefsPtr is optional : one may provide NULL as argument,
|
||||
* however, it's the only way to provide dictID in the frame header.
|
||||
* @dictBuffer must outlive the compression session.
|
||||
* @return : number of bytes written into dstBuffer for the header,
|
||||
* or an error code (which can be tested using LZ4F_isError())
|
||||
* NOTE: The LZ4Frame spec allows each independent block to be compressed with the dictionary,
|
||||
* but this entry supports a more limited scenario, where only the first block uses the dictionary.
|
||||
* This is still useful for small data, which only need one block anyway.
|
||||
* For larger inputs, one may be more interested in LZ4F_compressFrame_usingCDict() below.
|
||||
*/
|
||||
LZ4FLIB_API size_t
|
||||
LZ4F_compressBegin_usingDict(LZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const void* dictBuffer, size_t dictSize,
|
||||
const LZ4F_preferences_t* prefsPtr);
|
||||
|
||||
/*! LZ4F_decompress_usingDict() : stable since v1.10
|
||||
* Same as LZ4F_decompress(), using a predefined dictionary.
|
||||
* Dictionary is used "in place", without any preprocessing.
|
||||
** It must remain accessible throughout the entire frame decoding. */
|
||||
LZ4FLIB_API size_t
|
||||
LZ4F_decompress_usingDict(LZ4F_dctx* dctxPtr,
|
||||
void* dstBuffer, size_t* dstSizePtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr,
|
||||
const void* dict, size_t dictSize,
|
||||
const LZ4F_decompressOptions_t* decompressOptionsPtr);
|
||||
|
||||
/*****************************************
|
||||
* Bulk processing dictionary compression
|
||||
*****************************************/
|
||||
|
||||
/* Loading a dictionary has a cost, since it involves construction of tables.
|
||||
* The Bulk processing dictionary API makes it possible to share this cost
|
||||
* over an arbitrary number of compression jobs, even concurrently,
|
||||
* markedly improving compression latency for these cases.
|
||||
*
|
||||
* Note that there is no corresponding bulk API for the decompression side,
|
||||
* because dictionary does not carry any initialization cost for decompression.
|
||||
* Use the regular LZ4F_decompress_usingDict() there.
|
||||
*/
|
||||
typedef struct LZ4F_CDict_s LZ4F_CDict;
|
||||
|
||||
/*! LZ4_createCDict() : stable since v1.10
|
||||
* When compressing multiple messages / blocks using the same dictionary, it's recommended to initialize it just once.
|
||||
* LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
|
||||
* LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
|
||||
* @dictBuffer can be released after LZ4_CDict creation, since its content is copied within CDict. */
|
||||
LZ4FLIB_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize);
|
||||
LZ4FLIB_API void LZ4F_freeCDict(LZ4F_CDict* CDict);
|
||||
|
||||
/*! LZ4_compressFrame_usingCDict() : stable since v1.10
|
||||
* Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary.
|
||||
* @cctx must point to a context created by LZ4F_createCompressionContext().
|
||||
* If @cdict==NULL, compress without a dictionary.
|
||||
* @dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
|
||||
* If this condition is not respected, function will fail (@return an errorCode).
|
||||
* The LZ4F_preferences_t structure is optional : one may provide NULL as argument,
|
||||
* but it's not recommended, as it's the only way to provide @dictID in the frame header.
|
||||
* @return : number of bytes written into dstBuffer.
|
||||
* or an error code if it fails (can be tested using LZ4F_isError())
|
||||
* Note: for larger inputs generating multiple independent blocks,
|
||||
* this entry point uses the dictionary for each block. */
|
||||
LZ4FLIB_API size_t
|
||||
LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const LZ4F_CDict* cdict,
|
||||
const LZ4F_preferences_t* preferencesPtr);
|
||||
|
||||
/*! LZ4F_compressBegin_usingCDict() : stable since v1.10
|
||||
* Inits streaming dictionary compression, and writes the frame header into dstBuffer.
|
||||
* @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
|
||||
* @prefsPtr is optional : one may provide NULL as argument,
|
||||
* note however that it's the only way to insert a @dictID in the frame header.
|
||||
* @cdict must outlive the compression session.
|
||||
* @return : number of bytes written into dstBuffer for the header,
|
||||
* or an error code, which can be tested using LZ4F_isError(). */
|
||||
LZ4FLIB_API size_t
|
||||
LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const LZ4F_CDict* cdict,
|
||||
const LZ4F_preferences_t* prefsPtr);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
@@ -518,11 +630,8 @@ LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always su
|
||||
#if defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843)
|
||||
#define LZ4F_H_STATIC_09782039843
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* These declarations are not stable and may change in the future.
|
||||
/* Note :
|
||||
* The below declarations are not stable and may change in the future.
|
||||
* They are therefore only safe to depend on
|
||||
* when the caller is statically linked against the library.
|
||||
* To access their declarations, define LZ4F_STATIC_LINKING_ONLY.
|
||||
@@ -532,6 +641,11 @@ extern "C" {
|
||||
* by defining LZ4F_PUBLISH_STATIC_FUNCTIONS.
|
||||
* Use at your own risk.
|
||||
*/
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS
|
||||
# define LZ4FLIB_STATIC_API LZ4FLIB_API
|
||||
#else
|
||||
@@ -545,7 +659,7 @@ extern "C" {
|
||||
ITEM(ERROR_GENERIC) \
|
||||
ITEM(ERROR_maxBlockSize_invalid) \
|
||||
ITEM(ERROR_blockMode_invalid) \
|
||||
ITEM(ERROR_contentChecksumFlag_invalid) \
|
||||
ITEM(ERROR_parameter_invalid) \
|
||||
ITEM(ERROR_compressionLevel_invalid) \
|
||||
ITEM(ERROR_headerVersion_wrong) \
|
||||
ITEM(ERROR_blockChecksum_invalid) \
|
||||
@@ -563,6 +677,8 @@ extern "C" {
|
||||
ITEM(ERROR_frameDecoding_alreadyStarted) \
|
||||
ITEM(ERROR_compressionState_uninitialized) \
|
||||
ITEM(ERROR_parameter_null) \
|
||||
ITEM(ERROR_io_write) \
|
||||
ITEM(ERROR_io_read) \
|
||||
ITEM(ERROR_maxCode)
|
||||
|
||||
#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM,
|
||||
@@ -573,22 +689,26 @@ typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM)
|
||||
|
||||
LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult);
|
||||
|
||||
/**********************************
|
||||
* Advanced compression operations
|
||||
*********************************/
|
||||
|
||||
/*! LZ4F_getBlockSize() :
|
||||
* Return, in scalar format (size_t),
|
||||
* the maximum block size associated with blockSizeID.
|
||||
* @return, in scalar format (size_t),
|
||||
* the maximum block size associated with @blockSizeID,
|
||||
* or an error code (can be tested using LZ4F_isError()) if @blockSizeID is invalid.
|
||||
**/
|
||||
LZ4FLIB_STATIC_API size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID);
|
||||
|
||||
/*! LZ4F_uncompressedUpdate() :
|
||||
* LZ4F_uncompressedUpdate() can be called repetitively to add as much data uncompressed data as necessary.
|
||||
* LZ4F_uncompressedUpdate() can be called repetitively to add data stored as uncompressed blocks.
|
||||
* Important rule: dstCapacity MUST be large enough to store the entire source buffer as
|
||||
* no compression is done for this operation
|
||||
* If this condition is not respected, LZ4F_uncompressedUpdate() will fail (result is an errorCode).
|
||||
* After an error, the state is left in a UB state, and must be re-initialized or freed.
|
||||
* If previously a compressed block was written, buffered data is flushed
|
||||
* If previously a compressed block was written, buffered data is flushed first,
|
||||
* before appending uncompressed data is continued.
|
||||
* This is only supported when LZ4F_blockIndependent is used
|
||||
* This operation is only supported when LZ4F_blockIndependent is used.
|
||||
* `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
|
||||
* @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
|
||||
* or an error code if it fails (which can be tested using LZ4F_isError())
|
||||
@@ -600,81 +720,9 @@ LZ4F_uncompressedUpdate(LZ4F_cctx* cctx,
|
||||
const LZ4F_compressOptions_t* cOptPtr);
|
||||
|
||||
/**********************************
|
||||
* Bulk processing dictionary API
|
||||
* Custom memory allocation
|
||||
*********************************/
|
||||
|
||||
/* A Dictionary is useful for the compression of small messages (KB range).
|
||||
* It dramatically improves compression efficiency.
|
||||
*
|
||||
* LZ4 can ingest any input as dictionary, though only the last 64 KB are useful.
|
||||
* Best results are generally achieved by using Zstandard's Dictionary Builder
|
||||
* to generate a high-quality dictionary from a set of samples.
|
||||
*
|
||||
* Loading a dictionary has a cost, since it involves construction of tables.
|
||||
* The Bulk processing dictionary API makes it possible to share this cost
|
||||
* over an arbitrary number of compression jobs, even concurrently,
|
||||
* markedly improving compression latency for these cases.
|
||||
*
|
||||
* The same dictionary will have to be used on the decompression side
|
||||
* for decoding to be successful.
|
||||
* To help identify the correct dictionary at decoding stage,
|
||||
* the frame header allows optional embedding of a dictID field.
|
||||
*/
|
||||
typedef struct LZ4F_CDict_s LZ4F_CDict;
|
||||
|
||||
/*! LZ4_createCDict() :
|
||||
* When compressing multiple messages / blocks using the same dictionary, it's recommended to load it just once.
|
||||
* LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
|
||||
* LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
|
||||
* `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */
|
||||
LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize);
|
||||
LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict);
|
||||
|
||||
|
||||
/*! LZ4_compressFrame_usingCDict() :
|
||||
* Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary.
|
||||
* cctx must point to a context created by LZ4F_createCompressionContext().
|
||||
* If cdict==NULL, compress without a dictionary.
|
||||
* dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
|
||||
* If this condition is not respected, function will fail (@return an errorCode).
|
||||
* The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
|
||||
* but it's not recommended, as it's the only way to provide dictID in the frame header.
|
||||
* @return : number of bytes written into dstBuffer.
|
||||
* or an error code if it fails (can be tested using LZ4F_isError()) */
|
||||
LZ4FLIB_STATIC_API size_t
|
||||
LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const LZ4F_CDict* cdict,
|
||||
const LZ4F_preferences_t* preferencesPtr);
|
||||
|
||||
|
||||
/*! LZ4F_compressBegin_usingCDict() :
|
||||
* Inits streaming dictionary compression, and writes the frame header into dstBuffer.
|
||||
* dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
|
||||
* `prefsPtr` is optional : you may provide NULL as argument,
|
||||
* however, it's the only way to provide dictID in the frame header.
|
||||
* @return : number of bytes written into dstBuffer for the header,
|
||||
* or an error code (which can be tested using LZ4F_isError()) */
|
||||
LZ4FLIB_STATIC_API size_t
|
||||
LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const LZ4F_CDict* cdict,
|
||||
const LZ4F_preferences_t* prefsPtr);
|
||||
|
||||
|
||||
/*! LZ4F_decompress_usingDict() :
|
||||
* Same as LZ4F_decompress(), using a predefined dictionary.
|
||||
* Dictionary is used "in place", without any preprocessing.
|
||||
** It must remain accessible throughout the entire frame decoding. */
|
||||
LZ4FLIB_STATIC_API size_t
|
||||
LZ4F_decompress_usingDict(LZ4F_dctx* dctxPtr,
|
||||
void* dstBuffer, size_t* dstSizePtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr,
|
||||
const void* dict, size_t dictSize,
|
||||
const LZ4F_decompressOptions_t* decompressOptionsPtr);
|
||||
|
||||
|
||||
/*! Custom memory allocation : v1.9.4+
|
||||
* These prototypes make it possible to pass custom allocation/free functions.
|
||||
* LZ4F_customMem is provided at state creation time, using LZ4F_create*_advanced() listed below.
|
||||
@@ -699,6 +747,11 @@ LZ4FLIB_STATIC_API LZ4F_cctx* LZ4F_createCompressionContext_advanced(LZ4F_Custom
|
||||
LZ4FLIB_STATIC_API LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version);
|
||||
LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict_advanced(LZ4F_CustomMem customMem, const void* dictBuffer, size_t dictSize);
|
||||
|
||||
/*! Context size inspection : v1.10.1+
|
||||
* These functions return the total memory footprint of the provided context.
|
||||
*/
|
||||
LZ4FLIB_STATIC_API size_t LZ4F_cctx_size(const LZ4F_cctx* cctx);
|
||||
LZ4FLIB_STATIC_API size_t LZ4F_dctx_size(const LZ4F_dctx* dctx);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
LZ4 auto-framing library
|
||||
Header File for static linking only
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
Copyright (c) Yann Collet. All rights reserved.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
LZ4 HC - High Compression Mode of LZ4
|
||||
Header File
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
Copyright (c) Yann Collet. All rights reserved.
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
@@ -44,7 +44,7 @@ extern "C" {
|
||||
|
||||
|
||||
/* --- Useful constants --- */
|
||||
#define LZ4HC_CLEVEL_MIN 3
|
||||
#define LZ4HC_CLEVEL_MIN 2
|
||||
#define LZ4HC_CLEVEL_DEFAULT 9
|
||||
#define LZ4HC_CLEVEL_OPT_MIN 10
|
||||
#define LZ4HC_CLEVEL_MAX 12
|
||||
@@ -126,6 +126,8 @@ LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr);
|
||||
|
||||
After reset, a first "fictional block" can be designated as initial dictionary,
|
||||
using LZ4_loadDictHC() (Optional).
|
||||
Note: In order for LZ4_loadDictHC() to create the correct data structure,
|
||||
it is essential to set the compression level _before_ loading the dictionary.
|
||||
|
||||
Invoke LZ4_compress_HC_continue() to compress each successive block.
|
||||
The number of blocks is unlimited.
|
||||
@@ -135,12 +137,12 @@ LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr);
|
||||
It's allowed to update compression level anytime between blocks,
|
||||
using LZ4_setCompressionLevel() (experimental).
|
||||
|
||||
'dst' buffer should be sized to handle worst case scenarios
|
||||
@dst buffer should be sized to handle worst case scenarios
|
||||
(see LZ4_compressBound(), it ensures compression success).
|
||||
In case of failure, the API does not guarantee recovery,
|
||||
so the state _must_ be reset.
|
||||
To ensure compression success
|
||||
whenever `dst` buffer size cannot be made >= LZ4_compressBound(),
|
||||
whenever @dst buffer size cannot be made >= LZ4_compressBound(),
|
||||
consider using LZ4_compress_HC_continue_destSize().
|
||||
|
||||
Whenever previous input blocks can't be preserved unmodified in-place during compression of next blocks,
|
||||
@@ -176,6 +178,34 @@ LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr
|
||||
LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize);
|
||||
|
||||
|
||||
/*! LZ4_attach_HC_dictionary() : stable since v1.10.0
|
||||
* This API allows for the efficient re-use of a static dictionary many times.
|
||||
*
|
||||
* Rather than re-loading the dictionary buffer into a working context before
|
||||
* each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a
|
||||
* working LZ4_streamHC_t, this function introduces a no-copy setup mechanism,
|
||||
* in which the working stream references the dictionary stream in-place.
|
||||
*
|
||||
* Several assumptions are made about the state of the dictionary stream.
|
||||
* Currently, only streams which have been prepared by LZ4_loadDictHC() should
|
||||
* be expected to work.
|
||||
*
|
||||
* Alternatively, the provided dictionary stream pointer may be NULL, in which
|
||||
* case any existing dictionary stream is unset.
|
||||
*
|
||||
* A dictionary should only be attached to a stream without any history (i.e.,
|
||||
* a stream that has just been reset).
|
||||
*
|
||||
* The dictionary will remain attached to the working stream only for the
|
||||
* current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the
|
||||
* dictionary context association from the working stream. The dictionary
|
||||
* stream (and source buffer) must remain in-place / accessible / unchanged
|
||||
* through the lifetime of the stream session.
|
||||
*/
|
||||
LZ4LIB_API void
|
||||
LZ4_attach_HC_dictionary(LZ4_streamHC_t* working_stream,
|
||||
const LZ4_streamHC_t* dictionary_stream);
|
||||
|
||||
|
||||
/*^**********************************************
|
||||
* !!!!!! STATIC LINKING ONLY !!!!!!
|
||||
@@ -204,18 +234,18 @@ LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, in
|
||||
typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
|
||||
struct LZ4HC_CCtx_internal
|
||||
{
|
||||
LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
|
||||
LZ4_u16 chainTable[LZ4HC_MAXD];
|
||||
const LZ4_byte* end; /* next block here to continue on current prefix */
|
||||
LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
|
||||
LZ4_u16 chainTable[LZ4HC_MAXD];
|
||||
const LZ4_byte* end; /* next block here to continue on current prefix */
|
||||
const LZ4_byte* prefixStart; /* Indexes relative to this position */
|
||||
const LZ4_byte* dictStart; /* alternate reference for extDict */
|
||||
LZ4_u32 dictLimit; /* below that point, need extDict */
|
||||
LZ4_u32 lowLimit; /* below that point, no more dict */
|
||||
LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
|
||||
short compressionLevel;
|
||||
LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
|
||||
otherwise, favor compression ratio */
|
||||
LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
|
||||
LZ4_u32 dictLimit; /* below that point, need extDict */
|
||||
LZ4_u32 lowLimit; /* below that point, no more history */
|
||||
LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
|
||||
short compressionLevel;
|
||||
LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
|
||||
otherwise, favor compression ratio */
|
||||
LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
|
||||
const LZ4HC_CCtx_internal* dictCtx;
|
||||
};
|
||||
|
||||
@@ -376,35 +406,6 @@ LZ4LIB_STATIC_API int LZ4_compress_HC_extStateHC_fastReset (
|
||||
int srcSize, int dstCapacity,
|
||||
int compressionLevel);
|
||||
|
||||
/*! LZ4_attach_HC_dictionary() :
|
||||
* This is an experimental API that allows for the efficient use of a
|
||||
* static dictionary many times.
|
||||
*
|
||||
* Rather than re-loading the dictionary buffer into a working context before
|
||||
* each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a
|
||||
* working LZ4_streamHC_t, this function introduces a no-copy setup mechanism,
|
||||
* in which the working stream references the dictionary stream in-place.
|
||||
*
|
||||
* Several assumptions are made about the state of the dictionary stream.
|
||||
* Currently, only streams which have been prepared by LZ4_loadDictHC() should
|
||||
* be expected to work.
|
||||
*
|
||||
* Alternatively, the provided dictionary stream pointer may be NULL, in which
|
||||
* case any existing dictionary stream is unset.
|
||||
*
|
||||
* A dictionary should only be attached to a stream without any history (i.e.,
|
||||
* a stream that has just been reset).
|
||||
*
|
||||
* The dictionary will remain attached to the working stream only for the
|
||||
* current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the
|
||||
* dictionary context association from the working stream. The dictionary
|
||||
* stream (and source buffer) must remain in-place / accessible / unchanged
|
||||
* through the lifetime of the stream session.
|
||||
*/
|
||||
LZ4LIB_STATIC_API void LZ4_attach_HC_dictionary(
|
||||
LZ4_streamHC_t *working_stream,
|
||||
const LZ4_streamHC_t *dictionary_stream);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -120,12 +120,12 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
|
||||
/* *************************************
|
||||
* Compiler Specific Options
|
||||
***************************************/
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
#if defined (_MSC_VER) && !defined (__clang__) /* MSVC */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
# define FORCE_INLINE static __forceinline
|
||||
#else
|
||||
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# ifdef __GNUC__
|
||||
# if defined (__GNUC__) || defined (__clang__)
|
||||
# define FORCE_INLINE static inline __attribute__((always_inline))
|
||||
# else
|
||||
# define FORCE_INLINE static inline
|
||||
@@ -213,7 +213,7 @@ static U32 XXH_swap32 (U32 x)
|
||||
/* *************************************
|
||||
* Architecture Macros
|
||||
***************************************/
|
||||
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
|
||||
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianness;
|
||||
|
||||
/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
|
||||
#ifndef XXH_CPU_LITTLE_ENDIAN
|
||||
@@ -231,7 +231,7 @@ static int XXH_isLittleEndian(void)
|
||||
*****************************/
|
||||
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
|
||||
|
||||
FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
||||
FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianness endian, XXH_alignment align)
|
||||
{
|
||||
if (align==XXH_unaligned)
|
||||
return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
|
||||
@@ -239,7 +239,7 @@ FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_a
|
||||
return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
|
||||
FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianness endian)
|
||||
{
|
||||
return XXH_readLE32_align(ptr, endian, XXH_unaligned);
|
||||
}
|
||||
@@ -289,7 +289,7 @@ static U32 XXH32_avalanche(U32 h32)
|
||||
|
||||
static U32
|
||||
XXH32_finalize(U32 h32, const void* ptr, size_t len,
|
||||
XXH_endianess endian, XXH_alignment align)
|
||||
XXH_endianness endian, XXH_alignment align)
|
||||
|
||||
{
|
||||
const BYTE* p = (const BYTE*)ptr;
|
||||
@@ -350,7 +350,7 @@ XXH32_finalize(U32 h32, const void* ptr, size_t len,
|
||||
|
||||
FORCE_INLINE U32
|
||||
XXH32_endian_align(const void* input, size_t len, U32 seed,
|
||||
XXH_endianess endian, XXH_alignment align)
|
||||
XXH_endianness endian, XXH_alignment align)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* bEnd = p + len;
|
||||
@@ -398,7 +398,7 @@ XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int s
|
||||
XXH32_update(&state, input, len);
|
||||
return XXH32_digest(&state);
|
||||
#else
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if (XXH_FORCE_ALIGN_CHECK) {
|
||||
if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
|
||||
@@ -449,7 +449,7 @@ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int s
|
||||
|
||||
|
||||
FORCE_INLINE XXH_errorcode
|
||||
XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
||||
XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianness endian)
|
||||
{
|
||||
if (input==NULL)
|
||||
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
|
||||
@@ -514,7 +514,7 @@ XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_end
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
|
||||
@@ -524,7 +524,7 @@ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void*
|
||||
|
||||
|
||||
FORCE_INLINE U32
|
||||
XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
|
||||
XXH32_digest_endian (const XXH32_state_t* state, XXH_endianness endian)
|
||||
{
|
||||
U32 h32;
|
||||
|
||||
@@ -545,7 +545,7 @@ XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
|
||||
|
||||
XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH32_digest_endian(state_in, XXH_littleEndian);
|
||||
@@ -642,7 +642,7 @@ static U64 XXH_swap64 (U64 x)
|
||||
}
|
||||
#endif
|
||||
|
||||
FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
||||
FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianness endian, XXH_alignment align)
|
||||
{
|
||||
if (align==XXH_unaligned)
|
||||
return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
|
||||
@@ -650,7 +650,7 @@ FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_a
|
||||
return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
|
||||
FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianness endian)
|
||||
{
|
||||
return XXH_readLE64_align(ptr, endian, XXH_unaligned);
|
||||
}
|
||||
@@ -700,7 +700,7 @@ static U64 XXH64_avalanche(U64 h64)
|
||||
|
||||
static U64
|
||||
XXH64_finalize(U64 h64, const void* ptr, size_t len,
|
||||
XXH_endianess endian, XXH_alignment align)
|
||||
XXH_endianness endian, XXH_alignment align)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)ptr;
|
||||
|
||||
@@ -809,7 +809,7 @@ XXH64_finalize(U64 h64, const void* ptr, size_t len,
|
||||
|
||||
FORCE_INLINE U64
|
||||
XXH64_endian_align(const void* input, size_t len, U64 seed,
|
||||
XXH_endianess endian, XXH_alignment align)
|
||||
XXH_endianness endian, XXH_alignment align)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* bEnd = p + len;
|
||||
@@ -861,7 +861,7 @@ XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned
|
||||
XXH64_update(&state, input, len);
|
||||
return XXH64_digest(&state);
|
||||
#else
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if (XXH_FORCE_ALIGN_CHECK) {
|
||||
if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
|
||||
@@ -909,7 +909,7 @@ XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long
|
||||
}
|
||||
|
||||
FORCE_INLINE XXH_errorcode
|
||||
XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
||||
XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianness endian)
|
||||
{
|
||||
if (input==NULL)
|
||||
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
|
||||
@@ -970,7 +970,7 @@ XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_en
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
|
||||
@@ -978,7 +978,7 @@ XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void*
|
||||
return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
|
||||
}
|
||||
|
||||
FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
|
||||
FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianness endian)
|
||||
{
|
||||
U64 h64;
|
||||
|
||||
@@ -1004,7 +1004,7 @@ FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess
|
||||
|
||||
XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_digest_endian(state_in, XXH_littleEndian);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
cmake_minimum_required(VERSION 3.2)
|
||||
cmake_minimum_required(VERSION 3.12)
|
||||
|
||||
project(nonewrap)
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ size_t __stdcall CompressBufferBound(size_t inBufferSize)
|
||||
return inBufferSize * 4;
|
||||
}
|
||||
|
||||
size_t __stdcall CompressBegin(Context** pctx, int compressionLevel, void* outBuff, size_t outCapacity, void* dict, size_t dictSize)
|
||||
size_t __stdcall CompressBegin(Context **pctx, int compressionLevel, void *outBuff, size_t outCapacity, void *dict, size_t dictSize)
|
||||
{
|
||||
Context *ctx = (Context *)malloc(sizeof(Context));
|
||||
if (ctx == NULL) return -1;
|
||||
@@ -18,23 +18,23 @@ size_t __stdcall CompressBegin(Context** pctx, int compressionLevel, void* outBu
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t __stdcall CompressUpdate(Context* ctx,void* dstBuffer, size_t dstCapacity,const void* srcBuffer, size_t srcSize)
|
||||
size_t __stdcall CompressUpdate(Context *ctx, void *dstBuffer, size_t dstCapacity, const void *srcBuffer, size_t srcSize)
|
||||
{
|
||||
memcpy(dstBuffer, srcBuffer, srcSize);
|
||||
return srcSize;
|
||||
}
|
||||
|
||||
size_t __stdcall CompressEnd(Context* ctx, void* dstBuffer, size_t dstCapacity)
|
||||
size_t __stdcall CompressEnd(Context *ctx, void *dstBuffer, size_t dstCapacity)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __stdcall CompressContextFree(Context* ctx)
|
||||
void __stdcall CompressContextFree(Context *ctx)
|
||||
{
|
||||
free(ctx);
|
||||
}
|
||||
|
||||
size_t __stdcall DecompressBegin(Context **pdctx,void *inBuffer,size_t *inBufferSize, size_t *blockSize, void* dict, size_t dictSize)
|
||||
size_t __stdcall DecompressBegin(Context **pdctx, void *inBuffer, size_t *inBufferSize, size_t *blockSize, void *dict, size_t dictSize)
|
||||
{
|
||||
Context *ctx = (Context *)malloc(sizeof(Context));
|
||||
if (ctx == NULL) return -1;
|
||||
@@ -44,19 +44,19 @@ size_t __stdcall DecompressBegin(Context **pdctx,void *inBuffer,size_t *inBuffer
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __stdcall DecompressContextReset(Context* dctx)
|
||||
void __stdcall DecompressContextReset(Context *dctx)
|
||||
{
|
||||
}
|
||||
|
||||
size_t __stdcall DecompressUpdate(Context* dctx, void* outBuffer, size_t * outBufferSize, void* inBuffer, size_t * inBufferSize)
|
||||
size_t __stdcall DecompressUpdate(Context *dctx, void *outBuffer, size_t *outBufferSize, void *inBuffer, size_t *inBufferSize)
|
||||
{
|
||||
memcpy(outBuffer, inBuffer, *inBufferSize);
|
||||
*outBufferSize = *inBufferSize;
|
||||
return 1;
|
||||
}
|
||||
|
||||
size_t __stdcall DecompressEnd(Context* ctx)
|
||||
size_t __stdcall DecompressEnd(Context *ctx)
|
||||
{
|
||||
free(ctx);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
cmake_minimum_required(VERSION 3.2)
|
||||
cmake_minimum_required(VERSION 3.12)
|
||||
|
||||
project(zstdwrap)
|
||||
|
||||
|
||||
@@ -10,65 +10,61 @@ size_t __stdcall CompressBufferBound(size_t inBufferSize)
|
||||
return ZSTD_COMPRESSBOUND(inBufferSize);
|
||||
}
|
||||
|
||||
size_t __stdcall CompressBegin(ZSTD_CStream** pctx, int compressionLevel, void* outBuff, size_t outCapacity, void* dict, size_t dictSize)
|
||||
size_t __stdcall CompressBegin(ZSTD_CStream **pctx, int compressionLevel, void *outBuff, size_t outCapacity, void *dict, size_t dictSize)
|
||||
{
|
||||
ZSTD_CStream *ctx = ZSTD_createCStream();
|
||||
if (ctx == NULL) return -1;
|
||||
if (ctx == NULL)
|
||||
return -1;
|
||||
ZSTD_CCtx_setParameter(ctx, ZSTD_c_compressionLevel, compressionLevel);
|
||||
if (dict)
|
||||
{
|
||||
ZSTD_CCtx_loadDictionary(ctx, dict, dictSize);
|
||||
}
|
||||
*pctx = ctx;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
size_t __stdcall CompressUpdate(ZSTD_CStream* ctx,void* dstBuffer, size_t dstCapacity,const void* srcBuffer, size_t srcSize)
|
||||
size_t __stdcall CompressUpdate(ZSTD_CStream *ctx, void *dstBuffer, size_t dstCapacity, const void *srcBuffer, size_t srcSize)
|
||||
{
|
||||
ZSTD_outBuffer obuf = {dstBuffer, dstCapacity, 0};
|
||||
ZSTD_inBuffer ibuf = {srcBuffer, srcSize, 0};
|
||||
do
|
||||
{
|
||||
ZSTD_compressStream2(ctx, &obuf, &ibuf, ZSTD_e_continue);
|
||||
}
|
||||
while (ibuf.pos < ibuf.size);
|
||||
} while (ibuf.pos < ibuf.size);
|
||||
return obuf.pos;
|
||||
}
|
||||
|
||||
size_t __stdcall CompressEnd(ZSTD_CStream* ctx, void* dstBuffer, size_t dstCapacity)
|
||||
size_t __stdcall CompressEnd(ZSTD_CStream *ctx, void *dstBuffer, size_t dstCapacity)
|
||||
{
|
||||
ZSTD_outBuffer obuf = {dstBuffer, dstCapacity, 0};
|
||||
ZSTD_inBuffer ibuf = {NULL, 0, 0};
|
||||
while (ZSTD_compressStream2(ctx, &obuf, &ibuf, ZSTD_e_end) > 0) {}
|
||||
while (ZSTD_compressStream2(ctx, &obuf, &ibuf, ZSTD_e_end) > 0) { }
|
||||
return obuf.pos;
|
||||
}
|
||||
|
||||
void __stdcall CompressContextFree(ZSTD_CStream* ctx)
|
||||
void __stdcall CompressContextFree(ZSTD_CStream *ctx)
|
||||
{
|
||||
ZSTD_freeCStream(ctx);
|
||||
}
|
||||
|
||||
size_t __stdcall DecompressBegin(ZSTD_DStream **pdctx,void *inBuffer,size_t *inBufferSize, size_t *blockSize, void* dict, size_t dictSize)
|
||||
size_t __stdcall DecompressBegin(ZSTD_DStream **pdctx, void *inBuffer, size_t *inBufferSize, size_t *blockSize, void *dict, size_t dictSize)
|
||||
{
|
||||
ZSTD_DStream *ctx = ZSTD_createDStream();
|
||||
if (ctx == NULL) return -1;
|
||||
if (ctx == NULL)
|
||||
return -1;
|
||||
if (dict)
|
||||
{
|
||||
ZSTD_DCtx_loadDictionary(ctx, dict, dictSize);
|
||||
}
|
||||
*pdctx = ctx;
|
||||
*inBufferSize = 0;
|
||||
*blockSize = ZSTD_DStreamOutSize() << 2;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __stdcall DecompressContextReset(ZSTD_DStream* dctx)
|
||||
void __stdcall DecompressContextReset(ZSTD_DStream *dctx)
|
||||
{
|
||||
ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only);
|
||||
}
|
||||
|
||||
size_t __stdcall DecompressUpdate(ZSTD_DStream* dctx, void* outBuffer, size_t * outBufferSize, void* inBuffer, size_t * inBufferSize)
|
||||
size_t __stdcall DecompressUpdate(ZSTD_DStream *dctx, void *outBuffer, size_t *outBufferSize, void *inBuffer, size_t *inBufferSize)
|
||||
{
|
||||
ZSTD_outBuffer obuf = {outBuffer, *outBufferSize, 0};
|
||||
ZSTD_inBuffer ibuf = {inBuffer, *inBufferSize, 0};
|
||||
@@ -78,7 +74,7 @@ size_t __stdcall DecompressUpdate(ZSTD_DStream* dctx, void* outBuffer, size_t *
|
||||
return r;
|
||||
}
|
||||
|
||||
size_t __stdcall DecompressEnd(ZSTD_DStream* ctx)
|
||||
size_t __stdcall DecompressEnd(ZSTD_DStream *ctx)
|
||||
{
|
||||
return ZSTD_freeDStream(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Reference in New Issue
Block a user