diff --git a/CompressSave/CompressSave.cs b/CompressSave/CompressSave.cs deleted file mode 100644 index 2972aaf..0000000 --- a/CompressSave/CompressSave.cs +++ /dev/null @@ -1,389 +0,0 @@ -using System; -using System.Collections.Generic; -using System.IO; -using System.Reflection.Emit; -using BepInEx; -using BepInEx.Configuration; -using HarmonyLib; -using CompressSave.Wrapper; - -namespace CompressSave; - -public enum CompressionType -{ - None = 0, - LZ4 = 1, - Zstd = 2, -} - -[BepInPlugin(PluginInfo.PLUGIN_GUID, PluginInfo.PLUGIN_NAME, PluginInfo.PLUGIN_VERSION)] -public class CompressSave : BaseUnityPlugin -{ - private Harmony _patchSave, _patchUISave, _patchUILoad; - public static string StringFromCompresstionType(CompressionType type) - { - return type switch - { - CompressionType.LZ4 => "lz4", - CompressionType.Zstd => "zstd", - CompressionType.None => "none", - _ => throw new ArgumentException("Unknown compression type.") - }; - } - - private static CompressionType CompressionTypeFromString(string str) - { - return str switch - { - "lz4" => CompressionType.LZ4, - "zstd" => CompressionType.Zstd, - _ => CompressionType.None - }; - } - - public void Awake() - { - SaveUtil.Logger = Logger; - if (LZ4API.Avaliable && ZstdAPI.Avaliable) - { - PatchSave.CompressionTypeForSavesConfig = Config.Bind("Compression", "Type", StringFromCompresstionType(PatchSave.CompressionTypeForSaves), - new ConfigDescription("Set default compression type for manual saves.", - new AcceptableValueList("lz4", "zstd", "none"), new { })); - PatchSave.CompressionTypeForSaves = CompressionTypeFromString(PatchSave.CompressionTypeForSavesConfig.Value); - PatchSave.CompressionTypeForAutoSavesConfig = Config.Bind("Compression", "TypeForAuto", StringFromCompresstionType(PatchSave.CompressionTypeForAutoSaves), - new ConfigDescription("Set default compression type for auto saves and last-exit save.", - new AcceptableValueList("lz4", "zstd", "none"), new { })); - PatchSave.CompressionTypeForAutoSaves = CompressionTypeFromString(PatchSave.CompressionTypeForAutoSavesConfig.Value); - PatchSave.CompressionLevelForSaves = Config.Bind("Compression", "Level", PatchSave.CompressionLevelForSaves, - "Set default compression level for manual saves.\n0 for default level.\n3 ~ 12 for lz4, -5 ~ 22 for zstd.\nSmaller level leads to faster speed and less compression ratio.") - .Value; - PatchSave.CompressionLevelForAutoSaves = Config.Bind("Compression", "LevelForAuto", PatchSave.CompressionLevelForAutoSaves, - "Set default compression level for auto saves and last-exit save.\n0 for default level.\n3 ~ 12 for lz4, -5 ~ 22 for zstd.\nSmaller level leads to faster speed and less compression ratio.") - .Value; - PatchSave.EnableForAutoSaves = Config.Bind("Compression", "EnableForAutoSaves", true, - "Enable the feature for auto saves and last-exit save."); - PatchSave.CreateCompressBuffer(); - if (GameConfig.gameVersion != SaveUtil.VerifiedVersion) - { - SaveUtil.Logger.LogWarning( - $"Save version mismatch. Expect:{SaveUtil.VerifiedVersion}, Current:{GameConfig.gameVersion}. MOD may not work as expected."); - } - - _patchSave = Harmony.CreateAndPatchAll(typeof(PatchSave)); - if (PatchSave.EnableCompress) - _patchUISave = Harmony.CreateAndPatchAll(typeof(PatchUISaveGame)); - _patchUILoad = Harmony.CreateAndPatchAll(typeof(PatchUILoadGame)); - } - else - SaveUtil.Logger.LogWarning("Either nonewrap.dll, lz4warp.dll or zstdwrap.dll is not avaliable."); - I18N.Init(); - I18N.Add("Store", "Store (No Compression)", "存储(不压缩)"); - I18N.Add("Decompress", "Decompress", "解压存档"); - I18N.Add("Save with Compression", "Save (Compress)", "压缩保存"); - I18N.Add("Compression for auto saves", "Compression for auto saves", "  自动存档压缩方式"); - I18N.Add("Compression for manual saves", "Compression for manual saves", "  手动存档压缩方式"); - I18N.Apply(); - } - - public void OnDestroy() - { - if (_patchUISave != null) - { - PatchUISaveGame.OnDestroy(); - _patchUISave.UnpatchSelf(); - _patchUISave = null; - } - if (_patchUILoad != null) - { - PatchUILoadGame.OnDestroy(); - _patchUILoad.UnpatchSelf(); - _patchUILoad = null; - } - _patchSave?.UnpatchSelf(); - _patchSave = null; - } -} - -public class PatchSave -{ - public static readonly WrapperDefines LZ4Wrapper = new LZ4API(), ZstdWrapper = new ZstdAPI(); - private static readonly WrapperDefines NoneWrapper = new NoneAPI(); - private static CompressionStream.CompressBuffer _compressBuffer; - public static bool UseCompressSave; - private static CompressionType _compressionTypeForLoading = CompressionType.None; - private static CompressionType _compressionTypeForSaving = CompressionType.Zstd; - private static int _compressionLevelForSaving; - public static CompressionType CompressionTypeForSaves = CompressionType.Zstd; - public static CompressionType CompressionTypeForAutoSaves = CompressionType.Zstd; - public static ConfigEntry CompressionTypeForSavesConfig; - public static ConfigEntry CompressionTypeForAutoSavesConfig; - public static int CompressionLevelForSaves; - public static int CompressionLevelForAutoSaves; - public static ConfigEntry EnableForAutoSaves; - private static Stream _compressionStream; - public static bool EnableCompress; - - public static void CreateCompressBuffer() - { - const int bufSize = CompressionStream.Mb; - var outBufSize = LZ4Wrapper.CompressBufferBound(bufSize); - outBufSize = Math.Max(outBufSize, ZstdWrapper.CompressBufferBound(bufSize)); - outBufSize = Math.Max(outBufSize, NoneWrapper.CompressBufferBound(bufSize)); - _compressBuffer = CompressionStream.CreateBuffer((int)outBufSize, bufSize); - _compressionTypeForSaving = CompressionTypeForSaves; - _compressionLevelForSaving = CompressionLevelForSaves; - } - - private static void WriteHeader(FileStream fileStream) - { - if (fileStream == null) throw new ArgumentNullException(nameof(fileStream)); - switch (_compressionTypeForSaving) - { - case CompressionType.Zstd: - for (var i = 0; i < 3; i++) - fileStream.WriteByte(0xCC); - fileStream.WriteByte(0xCD); - break; - case CompressionType.LZ4: - for (var i = 0; i < 4; i++) - fileStream.WriteByte(0xCC); - break; - case CompressionType.None: - break; - default: - throw new ArgumentException("Unknown compression type."); - } - } - - [HarmonyPrefix] - [HarmonyPatch(typeof(GameSave), "AutoSave")] - [HarmonyPatch(typeof(GameSave), "SaveAsLastExit")] - private static void BeforeAutoSave() - { - UseCompressSave = EnableForAutoSaves.Value && EnableCompress; - if (!UseCompressSave) return; - _compressionTypeForSaving = CompressionTypeForAutoSaves; - _compressionLevelForSaving = CompressionLevelForAutoSaves; - } - - [HarmonyTranspiler] - [HarmonyPatch(typeof(GameSave), "SaveCurrentGame")] - private static IEnumerable SaveCurrentGame_Transpiler(IEnumerable instructions, ILGenerator generator) - { - /* BinaryWriter binaryWriter = new BinaryWriter(fileStream); => Create compressionStream and replace binaryWriter. - * set PerformanceMonitor.BeginStream to compressionStream. - * fileStream.Seek(6L, SeekOrigin.Begin); binaryWriter.Write(position); => Disable seek&write function. - * binaryWriter.Dispose(); => Dispose compressionStream before fileStream close. - */ - var matcher = new CodeMatcher(instructions, generator); - try - { - matcher.MatchForward(false, - new CodeMatch(OpCodes.Newobj, AccessTools.Constructor(typeof(BinaryWriter), new [] { typeof(FileStream) })) - ).Set( - OpCodes.Call, AccessTools.Method(typeof(PatchSave), "CreateBinaryWriter") - ).MatchForward(false, - new CodeMatch(OpCodes.Call, AccessTools.Method(typeof(PerformanceMonitor), "BeginStream")) - ).Set( - OpCodes.Call, AccessTools.Method(typeof(PatchSave), "MonitorStream") - ).MatchForward(false, - new CodeMatch(OpCodes.Callvirt, AccessTools.Method(typeof(Stream), "Seek")) - ).Set( - OpCodes.Call, AccessTools.Method(typeof(PatchSave), "FileLengthWrite0") - ).MatchForward(false, - new CodeMatch(OpCodes.Callvirt, AccessTools.Method(typeof(BinaryWriter), "Write", new [] { typeof(long) })) - ).Set( - OpCodes.Call, AccessTools.Method(typeof(PatchSave), "FileLengthWrite1") - ).MatchForward(false, - new CodeMatch(OpCodes.Callvirt, AccessTools.Method(typeof(IDisposable), "Dispose")) - ).Advance(1).Insert( - new CodeInstruction(OpCodes.Call, AccessTools.Method(typeof(PatchSave), "DisposeCompressionStream")) - ); - EnableCompress = true; - return matcher.InstructionEnumeration(); - } - catch (Exception ex) - { - SaveUtil.Logger.LogError( - "SaveCurrentGame_Transpiler failed. Mod version not compatible with game version."); - SaveUtil.Logger.LogError(ex); - } - return matcher.InstructionEnumeration(); - } - - public static void MonitorStream(Stream fileStream) - { - PerformanceMonitor.BeginStream(UseCompressSave ? _compressionStream : fileStream); - } - - public static BinaryWriter CreateBinaryWriter(FileStream fileStream) - { - if (UseCompressSave) - { - SaveUtil.Logger.LogDebug("Begin compress save"); - WriteHeader(fileStream); - _compressionStream = _compressionTypeForSaving switch - { - CompressionType.LZ4 => new CompressionStream(LZ4Wrapper, _compressionLevelForSaving, fileStream, _compressBuffer, true), - CompressionType.Zstd => new CompressionStream(ZstdWrapper, _compressionLevelForSaving, fileStream, _compressBuffer, true), - CompressionType.None => new CompressionStream(NoneWrapper, 0, fileStream, _compressBuffer, true), - _ => _compressionStream - }; - - return ((CompressionStream)_compressionStream).BufferWriter; - } - - SaveUtil.Logger.LogDebug("Begin normal save"); - return new BinaryWriter(fileStream); - } - - public static long FileLengthWrite0(FileStream fileStream, long offset, SeekOrigin origin) - { - return UseCompressSave ? 0L : fileStream.Seek(offset, origin); - } - - public static void FileLengthWrite1(BinaryWriter binaryWriter, long value) - { - if (UseCompressSave) return; - binaryWriter.Write(value); - } - - public static void DisposeCompressionStream() - { - if (!UseCompressSave) return; - if (_compressionStream == null) - { - UseCompressSave = false; - return; - } - var writeflag = _compressionStream.CanWrite; - Stream stream = null; - if (writeflag && _compressionTypeForSaving == CompressionType.None) - { - stream = ((CompressionStream)_compressionStream).OutStream; - } - // Dispose need to be done before fstream closed. - _compressionStream.Dispose(); - _compressionStream = null; - if (!writeflag) return; - // Reset UseCompressSave after writing to file - if (stream != null) - { - // Ugly implementation, but it works. May find a better solution someday. - var saveLen = stream.Seek(0L, SeekOrigin.End); - stream.Seek(6L, SeekOrigin.Begin); - var writer = new BinaryWriter(stream); - writer.Write(saveLen); - writer.Dispose(); - } - _compressionTypeForSaving = CompressionTypeForSaves; - _compressionLevelForSaving = CompressionLevelForSaves; - UseCompressSave = false; - } - - [HarmonyTranspiler] - [HarmonyPatch(typeof(GameSave), "LoadCurrentGame")] - [HarmonyPatch(typeof(GameSave), "LoadGameDesc")] - [HarmonyPatch(typeof(GameSave), "ReadHeader")] - [HarmonyPatch(typeof(GameSave), "ReadHeaderAndDescAndProperty")] - [HarmonyPatch(typeof(GameSave), "ReadModes")] - private static IEnumerable LoadCurrentGame_Transpiler(IEnumerable instructions, ILGenerator generator) - { - /* using (BinaryReader binaryReader = new BinaryReader(fileStream)) => Create decompressionStream and replace binaryReader. - * set PerformanceMonitor.BeginStream to decompressionStream. - * if (fileStream.Length != binaryReader.ReadInt64()) => Replace binaryReader.ReadInt64() to pass file length check. - * fileStream.Seek((long)num2, SeekOrigin.Current); => Use decompressionStream.Read to seek forward - * binaryReader.Dispose(); => Dispose decompressionStream before fileStream close. - */ - var matcher = new CodeMatcher(instructions, generator); - try - { - matcher.MatchForward(false, - new CodeMatch(OpCodes.Newobj, AccessTools.Constructor(typeof(BinaryReader), new [] { typeof(FileStream) })) - ).Set( - OpCodes.Call, AccessTools.Method(typeof(PatchSave), "CreateBinaryReader") - ).MatchForward(false, - new CodeMatch(OpCodes.Call, AccessTools.Method(typeof(PerformanceMonitor), "BeginStream")) - ); - - if (matcher.IsValid) - matcher.Set(OpCodes.Call, AccessTools.Method(typeof(PatchSave), "MonitorStream")); - - matcher.Start().MatchForward(false, - new CodeMatch(OpCodes.Callvirt, AccessTools.Method(typeof(BinaryReader), "ReadInt64")) - ).Set( - OpCodes.Call, AccessTools.Method(typeof(PatchSave), "FileLengthRead") - ).MatchForward(false, - new CodeMatch(OpCodes.Callvirt, AccessTools.Method(typeof(IDisposable), "Dispose")) - ).Advance(1).Insert( - new CodeInstruction(OpCodes.Call, AccessTools.Method(typeof(PatchSave), "DisposeCompressionStream")) - ).MatchBack(false, - new CodeMatch(OpCodes.Callvirt, AccessTools.Method(typeof(Stream), "Seek")) - ); - if (matcher.IsValid) - matcher.Set(OpCodes.Call, AccessTools.Method(typeof(PatchSave), "ReadSeek")); - - return matcher.InstructionEnumeration(); - } - catch (Exception ex) - { - SaveUtil.Logger.LogError( - "LoadCurrentGame_Transpiler failed. Mod version not compatible with game version."); - SaveUtil.Logger.LogError(ex); - } - - return matcher.InstructionEnumeration(); - } - - public static BinaryReader CreateBinaryReader(FileStream fileStream) - { - switch (_compressionTypeForLoading = SaveUtil.SaveGetCompressType(fileStream)) - { - case CompressionType.LZ4: - UseCompressSave = true; - _compressionStream = new DecompressionStream(LZ4Wrapper, fileStream); - return new PeekableReader((DecompressionStream)_compressionStream); - case CompressionType.Zstd: - UseCompressSave = true; - _compressionStream = new DecompressionStream(ZstdWrapper, fileStream); - return new PeekableReader((DecompressionStream)_compressionStream); - case CompressionType.None: - UseCompressSave = false; - fileStream.Seek(0, SeekOrigin.Begin); - return new BinaryReader(fileStream); - default: - throw new ArgumentOutOfRangeException(); - } - } - - public static long FileLengthRead(BinaryReader binaryReader) - { - switch (_compressionTypeForLoading) - { - case CompressionType.LZ4: - case CompressionType.Zstd: - binaryReader.ReadInt64(); - return _compressionStream.Length; - case CompressionType.None: - return binaryReader.ReadInt64(); - default: - throw new ArgumentOutOfRangeException(); - } - } - - public static long ReadSeek(FileStream fileStream, long offset, SeekOrigin origin) - { - switch (_compressionTypeForLoading) - { - case CompressionType.LZ4: - case CompressionType.Zstd: - while (offset > 0) - offset -= _compressionStream.Read(_compressBuffer.OutBuffer, 0, (int)offset); - return _compressionStream.Position; - case CompressionType.None: - return fileStream.Seek(offset, origin); - default: - throw new ArgumentOutOfRangeException(); - } - } -} \ No newline at end of file diff --git a/CompressSave/CompressSave.csproj b/CompressSave/CompressSave.csproj deleted file mode 100644 index eac343c..0000000 --- a/CompressSave/CompressSave.csproj +++ /dev/null @@ -1,30 +0,0 @@ - - - - - CompressSave - org.soardev.compresssave - DSP MOD - CompressSave - 1.3.5 - true - latest - net472 - https://nuget.bepinex.dev/v3/index.json - - - - - - - - - - - - - - - - - - diff --git a/CompressSave/I18N.cs b/CompressSave/I18N.cs deleted file mode 100644 index eaa43cb..0000000 --- a/CompressSave/I18N.cs +++ /dev/null @@ -1,128 +0,0 @@ -using System; -using System.Collections.Generic; -using HarmonyLib; - -public static class I18N -{ - private static bool _initialized; - private static bool _dirty; - - public static void Init() - { - Harmony.CreateAndPatchAll(typeof(I18N)); - } - - public static bool Initialized() => _initialized; - private static readonly List> Keys = []; - private static readonly Dictionary> Strings = []; - - public static void Add(string key, string enus, string zhcn = null) - { - if (zhcn == null && key == enus) return; - Keys.Add(Tuple.Create(key, enus, -1)); - if (Strings.TryGetValue(2052, out var zhcnList)) - { - zhcnList.Add(string.IsNullOrEmpty(zhcn) ? enus : zhcn); - } - else - { - Strings.Add(2052, [string.IsNullOrEmpty(zhcn) ? enus : zhcn]); - } - _dirty = true; - } - - private static void ApplyIndexers() - { - var indexer = Localization.namesIndexer; - var index = indexer.Count; - var len = Keys.Count; - for (var i = 0; i < len; i++) - { - var (key, def, value) = Keys[i]; - if (value >= 0) continue; - if (indexer.TryGetValue(key, out var idx)) - { - Keys[i] = Tuple.Create(key, def, idx); - continue; - } - indexer[key] = index; - Keys[i] = Tuple.Create(key, def, index); - index++; - } - _dirty = false; - var strings = Localization.strings; - if (strings == null) return; - var len2 = strings.Length; - for (var i = 0; i < len2; i++) - { - ApplyLanguage(i); - if (i != Localization.currentLanguageIndex) continue; - Localization.currentStrings = Localization.strings[i]; - Localization.currentFloats = Localization.floats[i]; - } - } - - private static void ApplyLanguage(int index) - { - var indexerLength = Localization.namesIndexer.Count; - var strs = Localization.strings[index]; - if (strs == null) return; - if (strs.Length < indexerLength) - { - var newStrs = new string[indexerLength]; - Array.Copy(strs, newStrs, strs.Length); - strs = newStrs; - Localization.strings[index] = strs; - } - var floats = Localization.floats[index]; - if (floats != null) - { - if (floats.Length < indexerLength) - { - var newFloats = new float[indexerLength]; - Array.Copy(floats, newFloats, floats.Length); - floats = newFloats; - Localization.floats[index] = floats; - } - } - - var keyLength = Keys.Count; - if (Strings.TryGetValue(Localization.Languages[index].lcId, out var list)) - { - for (var j = 0; j < keyLength; j++) - { - strs[Keys[j].Item3] = list[j]; - } - } - else - { - for (var j = 0; j < keyLength; j++) - { - strs[Keys[j].Item3] = Keys[j].Item2; - } - } - } - - public static void Apply() - { - if (!_initialized) return; - if (Keys.Count == 0) return; - if (!_dirty) return; - ApplyIndexers(); - } - - [HarmonyPostfix, HarmonyPriority(Priority.Last), HarmonyPatch(typeof(Localization), nameof(Localization.LoadSettings))] - private static void Localization_LoadSettings_Postfix() - { - if (_initialized) return; - _initialized = true; - Apply(); - } - - [HarmonyPostfix, HarmonyPriority(Priority.Last), HarmonyPatch(typeof(Localization), nameof(Localization.LoadLanguage))] - private static void Localization_LoadLanguage_Postfix(int index) - { - if (!_initialized) return; - ApplyLanguage(index); - } -} \ No newline at end of file diff --git a/CompressSave/LZ4WrapC/CMakeLists.txt b/CompressSave/LZ4WrapC/CMakeLists.txt deleted file mode 100644 index d726611..0000000 --- a/CompressSave/LZ4WrapC/CMakeLists.txt +++ /dev/null @@ -1,16 +0,0 @@ -cmake_minimum_required(VERSION 3.2) - -project(lz4wrap) - -add_library(lz4wrap SHARED - lz4/lz4.c lz4/lz4.h - lz4/lz4frame.c lz4/lz4frame.h lz4/lz4frame_static.h - lz4/lz4hc.c lz4/lz4hc.h - lz4/xxhash.c lz4/xxhash.h - dllmain.c LZ4Wrap.c LZ4Wrap.h) - -target_compile_definitions(lz4wrap PRIVATE LZ4WRAP_EXPORTS) -target_include_directories(lz4wrap PRIVATE lz4) -if(WIN32) - set_target_properties(lz4wrap PROPERTIES PREFIX "") -endif() diff --git a/CompressSave/LZ4WrapC/LZ4Wrap.c b/CompressSave/LZ4WrapC/LZ4Wrap.c deleted file mode 100644 index 542b318..0000000 --- a/CompressSave/LZ4WrapC/LZ4Wrap.c +++ /dev/null @@ -1,154 +0,0 @@ -// MathLibrary.cpp : Defines the exported functions for the DLL. -#include "LZ4Wrap.h" - -#include -#include -#include -#include -#define Check(assert,errorcode) if(!(assert)) {printf(LZ4F_getErrorName(errorcode)); return errorcode;} - - - -static CContext* CreateCompressContext() -{ - return (CContext*)malloc(sizeof(CContext)); -} - -void __stdcall CompressContextFree(CContext* ctx) -{ - if (ctx != NULL) - { - LZ4F_freeCompressionContext(ctx->cctx); - LZ4F_freeCDict(ctx->dict); - free(ctx); - } -} - -static LZ4F_preferences_t kPrefs = { - { LZ4F_max4MB, LZ4F_blockLinked, LZ4F_contentChecksumEnabled, LZ4F_frame, - 0 /* unknown content size */, 0/* no dictID */ , LZ4F_blockChecksumEnabled }, - 0, /* compression level; 0 == default */ - 0, /* autoflush */ - 0, /* favor decompression speed */ - { 0, 0, 0 }, /* reserved, must be set to 0 */ -}; - -size_t __stdcall CompressBufferBound(size_t inBufferSize) -{ - return LZ4F_compressBound(inBufferSize, &kPrefs) + LZ4F_HEADER_SIZE_MAX; -} - -CContext* CreateCompressContextFromBuffer(void* dict, size_t dictSize) { - CContext* ctx = CreateCompressContext(); - if (dict) - ctx->dict = LZ4F_createCDict(dict, dictSize); - else - ctx->dict = NULL; - if (ctx == NULL) return NULL; - LZ4F_compressionContext_t innerctx; - - LZ4F_errorCode_t ctxCreation = LZ4F_createCompressionContext(&innerctx, LZ4F_VERSION); - if (LZ4F_isError(ctxCreation)) - { - LZ4F_freeCompressionContext(innerctx); - CompressContextFree(ctx); - return NULL; - } - ctx->cctx = innerctx; - - return ctx; -} - -size_t __stdcall CompressBegin(CContext** pctx, int compressionLevel, void* outBuff , size_t outCapacity, void* dict, size_t dictSize) -{ - CContext* ctx = CreateCompressContextFromBuffer(dict, dictSize); - if (ctx == NULL) return -1; - - if (outCapacity < LZ4F_HEADER_SIZE_MAX || outCapacity < LZ4F_compressBound(0, &kPrefs)) return LZ4F_ERROR_dstMaxSize_tooSmall; - - kPrefs.compressionLevel = compressionLevel; - /* write frame header */ - size_t const headerSize = ctx->dict == NULL - ? LZ4F_compressBegin(ctx->cctx, outBuff, outCapacity, &kPrefs) - : LZ4F_compressBegin_usingCDict(ctx->cctx, outBuff, outCapacity, ctx->dict, &kPrefs); - - if (LZ4F_isError(headerSize)) - { - return headerSize; - } - *pctx = ctx; - return headerSize; -} - - -size_t __stdcall CompressUpdate(CContext* ctx,void* dstBuffer, size_t dstCapacity,const void* srcBuffer, size_t srcSize) -{ - size_t result = ctx->dict == NULL - ? LZ4F_compressUpdate(ctx->cctx, dstBuffer, dstCapacity, srcBuffer, srcSize, NULL) - : LZ4F_compressFrame_usingCDict(ctx->cctx, dstBuffer, dstCapacity, srcBuffer, srcSize, ctx->dict, NULL); - if (LZ4F_isError(result)) - { - const char *str = LZ4F_getErrorName(result); - fprintf(stderr, "%s\n", str); - } - return result; -} - -size_t __stdcall CompressEnd(CContext* ctx, void* dstBuffer, size_t dstCapacity) -{ - size_t writeSize = LZ4F_compressEnd(ctx->cctx, dstBuffer, dstCapacity, NULL); - return writeSize; -} - -static size_t get_block_size(const LZ4F_frameInfo_t* info) { - switch (info->blockSizeID) { - case LZ4F_default: - case LZ4F_max64KB: return 1 << 16; - case LZ4F_max256KB: return 1 << 18; - case LZ4F_max1MB: return 1 << 20; - case LZ4F_max4MB: return 1 << 22; - default: - return -1; - } -} -//return: input bytes expects for next call -size_t __stdcall DecompressBegin(DContext **pdctx,void *inBuffer,size_t *inBufferSize, size_t *blockSize, void* dict, size_t dictSize) -{ - DContext* dctx = (DContext*)malloc(sizeof(DContext)); - size_t const dctxStatus = LZ4F_createDecompressionContext(&dctx->dctx, LZ4F_VERSION); - Check(!LZ4F_isError(dctxStatus), dctxStatus); - - Check(*inBufferSize >= LZ4F_HEADER_SIZE_MAX, LZ4F_ERROR_dstMaxSize_tooSmall); - - LZ4F_frameInfo_t info; - size_t const fires = LZ4F_getFrameInfo(dctx->dctx, &info, inBuffer, inBufferSize); - Check(!LZ4F_isError(fires), fires); - - *blockSize = get_block_size(&info); - dctx->dict = dict; - dctx->dictSize = dictSize; - *pdctx = dctx; - return fires; -} - -void __stdcall DecompressContextReset(DContext* dctx) -{ - LZ4F_resetDecompressionContext(dctx->dctx); -} - -size_t __stdcall DecompressUpdate(DContext* dctx, void* outBuffer, size_t * outBufferSize, void* inBuffer, size_t * inBufferSize) -{ - size_t ret = dctx->dict == NULL - ? LZ4F_decompress(dctx->dctx, outBuffer, outBufferSize, inBuffer, inBufferSize, NULL) - : LZ4F_decompress_usingDict(dctx->dctx, outBuffer, outBufferSize, inBuffer, inBufferSize, dctx->dict, dctx->dictSize, NULL); - Check(!LZ4F_isError(ret), ret); - return ret; -} - -size_t __stdcall DecompressEnd(DContext* dctx) -{ - if (!dctx) return 0; - size_t r = LZ4F_freeDecompressionContext(dctx->dctx); - free(dctx); - return r; -} diff --git a/CompressSave/LZ4WrapC/LZ4Wrap.h b/CompressSave/LZ4WrapC/LZ4Wrap.h deleted file mode 100644 index 6a76374..0000000 --- a/CompressSave/LZ4WrapC/LZ4Wrap.h +++ /dev/null @@ -1,46 +0,0 @@ -#pragma once -#define LZ4F_STATIC_LINKING_ONLY -#if defined(__cplusplus) -#define API_EXTERN_C extern "C" -#else -#define API_EXTERN_C -#endif -#ifdef LZ4WRAP_EXPORTS -#define LZ4API API_EXTERN_C __declspec(dllexport) -#else -#define LZ4API API_EXTERN_C __declspec(dllimport) -#endif - -#include -#include - -typedef struct -{ - LZ4F_cctx* cctx; - LZ4F_CDict* dict; -} CContext; - -typedef struct -{ - LZ4F_dctx* dctx; - void* dict; - size_t dictSize; -} DContext; - -LZ4API void __stdcall CompressContextFree(CContext* ctx); - -LZ4API size_t __stdcall CompressBufferBound(size_t inBufferSize); - -LZ4API size_t __stdcall CompressBegin(CContext** ctx, int compressionLevel, void* outBuff, size_t outCapacity, void* dict, size_t dictSize); - -LZ4API size_t __stdcall CompressUpdate(CContext* ctx, void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize); - -LZ4API size_t __stdcall CompressEnd(CContext* ctx, void* dstBuffer, size_t dstCapacity); - -LZ4API size_t __stdcall DecompressBegin(DContext** pdctx, void* inBuffer, size_t* inBufferSize, size_t* blockSize, void* dict, size_t dictSize); - -LZ4API void __stdcall DecompressContextReset(DContext* dctx); - -LZ4API size_t __stdcall DecompressUpdate(DContext* dctx, void* outBuffer, size_t* outBufferSize, void* inBuffer, size_t* inBufferSize); - -LZ4API size_t __stdcall DecompressEnd(DContext* dctx); diff --git a/CompressSave/LZ4WrapC/dllmain.c b/CompressSave/LZ4WrapC/dllmain.c deleted file mode 100644 index 896c9b5..0000000 --- a/CompressSave/LZ4WrapC/dllmain.c +++ /dev/null @@ -1,20 +0,0 @@ -// dllmain.cpp : Defines the entry point for the DLL application. -#include - -BOOL APIENTRY DllMain( HMODULE hModule, - DWORD ul_reason_for_call, - LPVOID lpReserved - ) -{ - switch (ul_reason_for_call) - { - case DLL_PROCESS_ATTACH: - DisableThreadLibraryCalls(hModule); - break; - case DLL_THREAD_ATTACH: - case DLL_THREAD_DETACH: - case DLL_PROCESS_DETACH: - break; - } - return TRUE; -} diff --git a/CompressSave/LZ4WrapC/lz4/LICENSE b/CompressSave/LZ4WrapC/lz4/LICENSE deleted file mode 100644 index 4884916..0000000 --- a/CompressSave/LZ4WrapC/lz4/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -LZ4 Library -Copyright (c) 2011-2020, Yann Collet -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/CompressSave/LZ4WrapC/lz4/README.md b/CompressSave/LZ4WrapC/lz4/README.md deleted file mode 100644 index a86cd0d..0000000 --- a/CompressSave/LZ4WrapC/lz4/README.md +++ /dev/null @@ -1,187 +0,0 @@ -LZ4 - Library Files -================================ - -The `/lib` directory contains many files, but depending on project's objectives, -not all of them are required. -Limited systems may want to reduce the nb of source files to include -as a way to reduce binary size and dependencies. - -Capabilities are added at the "level" granularity, detailed below. - -#### Level 1 : Minimal LZ4 build - -The minimum required is **`lz4.c`** and **`lz4.h`**, -which provides the fast compression and decompression algorithms. -They generate and decode data using the [LZ4 block format]. - - -#### Level 2 : High Compression variant - -For more compression ratio at the cost of compression speed, -the High Compression variant called **lz4hc** is available. -Add files **`lz4hc.c`** and **`lz4hc.h`**. -This variant also compresses data using the [LZ4 block format], -and depends on regular `lib/lz4.*` source files. - - -#### Level 3 : Frame support, for interoperability - -In order to produce compressed data compatible with `lz4` command line utility, -it's necessary to use the [official interoperable frame format]. -This format is generated and decoded automatically by the **lz4frame** library. -Its public API is described in `lib/lz4frame.h`. -In order to work properly, lz4frame needs all other modules present in `/lib`, -including, lz4 and lz4hc, and also **xxhash**. -So it's necessary to also include `xxhash.c` and `xxhash.h`. - - -#### Level 4 : File compression operations - -As a helper around file operations, -the library has been recently extended with `lz4file.c` and `lz4file.h` -(still considered experimental at the time of this writing). -These helpers allow opening, reading, writing, and closing files -using transparent LZ4 compression / decompression. -As a consequence, using `lz4file` adds a dependency on ``. - -`lz4file` relies on `lz4frame` in order to produce compressed data -conformant to the [LZ4 Frame format] specification. -Consequently, to enable this capability, -it's necessary to include all `*.c` and `*.h` files from `lib/` directory. - - -#### Advanced / Experimental API - -Definitions which are not guaranteed to remain stable in future versions, -are protected behind macros, such as `LZ4_STATIC_LINKING_ONLY`. -As the name suggests, these definitions should only be invoked -in the context of static linking ***only***. -Otherwise, dependent application may fail on API or ABI break in the future. -The associated symbols are also not exposed by the dynamic library by default. -Should they be nonetheless needed, it's possible to force their publication -by using build macros `LZ4_PUBLISH_STATIC_FUNCTIONS` -and `LZ4F_PUBLISH_STATIC_FUNCTIONS`. - - -#### Build macros - -The following build macro can be selected to adjust source code behavior at compilation time : - -- `LZ4_FAST_DEC_LOOP` : this triggers a speed optimized decompression loop, more powerful on modern cpus. - This loop works great on `x86`, `x64` and `aarch64` cpus, and is automatically enabled for them. - It's also possible to enable or disable it manually, by passing `LZ4_FAST_DEC_LOOP=1` or `0` to the preprocessor. - For example, with `gcc` : `-DLZ4_FAST_DEC_LOOP=1`, - and with `make` : `CPPFLAGS+=-DLZ4_FAST_DEC_LOOP=1 make lz4`. - -- `LZ4_DISTANCE_MAX` : control the maximum offset that the compressor will allow. - Set to 65535 by default, which is the maximum value supported by lz4 format. - Reducing maximum distance will reduce opportunities for LZ4 to find matches, - hence will produce a worse compression ratio. - Setting a smaller max distance could allow compatibility with specific decoders with limited memory budget. - This build macro only influences the compressed output of the compressor. - -- `LZ4_DISABLE_DEPRECATE_WARNINGS` : invoking a deprecated function will make the compiler generate a warning. - This is meant to invite users to update their source code. - Should this be a problem, it's generally possible to make the compiler ignore these warnings, - for example with `-Wno-deprecated-declarations` on `gcc`, - or `_CRT_SECURE_NO_WARNINGS` for Visual Studio. - This build macro offers another project-specific method - by defining `LZ4_DISABLE_DEPRECATE_WARNINGS` before including the LZ4 header files. - -- `LZ4_FORCE_SW_BITCOUNT` : by default, the compression algorithm tries to determine lengths - by using bitcount instructions, generally implemented as fast single instructions in many cpus. - In case the target cpus doesn't support it, or compiler intrinsic doesn't work, or feature bad performance, - it's possible to use an optimized software path instead. - This is achieved by setting this build macros. - In most cases, it's not expected to be necessary, - but it can be legitimately considered for less common platforms. - -- `LZ4_ALIGN_TEST` : alignment test ensures that the memory area - passed as argument to become a compression state is suitably aligned. - This test can be disabled if it proves flaky, by setting this value to 0. - -- `LZ4_USER_MEMORY_FUNCTIONS` : replace calls to ``'s `malloc()`, `calloc()` and `free()` - by user-defined functions, which must be named `LZ4_malloc()`, `LZ4_calloc()` and `LZ4_free()`. - User functions must be available at link time. - -- `LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION` : - Remove support of dynamic memory allocation. - For more details, see description of this macro in `lib/lz4.c`. - -- `LZ4_FREESTANDING` : by setting this build macro to 1, - LZ4/HC removes dependencies on the C standard library, - including allocation functions and `memmove()`, `memcpy()`, and `memset()`. - This build macro is designed to help use LZ4/HC in restricted environments - (embedded, bootloader, etc). - For more details, see description of this macro in `lib/lz4.h`. - -- `LZ4_HEAPMODE` : Select how stateless compression functions like `LZ4_compress_default()` - allocate memory for their hash table, - in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). - -- `LZ4HC_HEAPMODE` : Select how stateless HC compression functions like `LZ4_compress_HC()` - allocate memory for their workspace: - in stack (0), or in heap (1:default). - Since workspace is rather large, stack can be inconvenient, hence heap mode is recommended. - -- `LZ4F_HEAPMODE` : selects how `LZ4F_compressFrame()` allocates the compression state, - either on stack (default, value 0) or using heap memory (value 1). - - -#### Makefile variables - -The following `Makefile` variables can be selected to alter the profile of produced binaries : -- `BUILD_SHARED` : generate `libzstd` dynamic library (enabled by default) -- `BUILD_STATIC` : generate `libzstd` static library (enabled by default) - - -#### Amalgamation - -lz4 source code can be amalgamated into a single file. -One can combine all source code into `lz4_all.c` by using following command: -``` -cat lz4.c lz4hc.c lz4frame.c > lz4_all.c -``` -(`cat` file order is important) then compile `lz4_all.c`. -All `*.h` files present in `/lib` remain necessary to compile `lz4_all.c`. - - -#### Windows : using MinGW+MSYS to create DLL - -DLL can be created using MinGW+MSYS with the `make liblz4` command. -This command creates `dll\liblz4.dll` and the import library `dll\liblz4.lib`. -To override the `dlltool` command when cross-compiling on Linux, just set the `DLLTOOL` variable. Example of cross compilation on Linux with mingw-w64 64 bits: -``` -make BUILD_STATIC=no CC=x86_64-w64-mingw32-gcc DLLTOOL=x86_64-w64-mingw32-dlltool OS=Windows_NT -``` -The import library is only required with Visual C++. -The header files `lz4.h`, `lz4hc.h`, `lz4frame.h` and the dynamic library -`dll\liblz4.dll` are required to compile a project using gcc/MinGW. -The dynamic library has to be added to linking options. -It means that if a project that uses LZ4 consists of a single `test-dll.c` -file it should be linked with `dll\liblz4.dll`. For example: -``` - $(CC) $(CFLAGS) -Iinclude/ test-dll.c -o test-dll dll\liblz4.dll -``` -The compiled executable will require LZ4 DLL which is available at `dll\liblz4.dll`. - - -#### Miscellaneous - -Other files present in the directory are not source code. They are : - - - `LICENSE` : contains the BSD license text - - `Makefile` : `make` script to compile and install lz4 library (static and dynamic) - - `liblz4.pc.in` : for `pkg-config` (used in `make install`) - - `README.md` : this file - -[official interoperable frame format]: ../doc/lz4_Frame_format.md -[LZ4 Frame format]: ../doc/lz4_Frame_format.md -[LZ4 block format]: ../doc/lz4_Block_format.md - - -#### License - -All source material within __lib__ directory are BSD 2-Clause licensed. -See [LICENSE](LICENSE) for details. -The license is also reminded at the top of each source file. diff --git a/CompressSave/LZ4WrapC/lz4/lz4.c b/CompressSave/LZ4WrapC/lz4/lz4.c deleted file mode 100644 index 0982f95..0000000 --- a/CompressSave/LZ4WrapC/lz4/lz4.c +++ /dev/null @@ -1,2751 +0,0 @@ -/* - LZ4 - Fast LZ compression algorithm - Copyright (C) 2011-2020, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 homepage : http://www.lz4.org - - LZ4 source repository : https://github.com/lz4/lz4 -*/ - -/*-************************************ -* Tuning parameters -**************************************/ -/* - * LZ4_HEAPMODE : - * Select how stateless compression functions like `LZ4_compress_default()` - * allocate memory for their hash table, - * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). - */ -#ifndef LZ4_HEAPMODE -# define LZ4_HEAPMODE 0 -#endif - -/* - * LZ4_ACCELERATION_DEFAULT : - * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0 - */ -#define LZ4_ACCELERATION_DEFAULT 1 -/* - * LZ4_ACCELERATION_MAX : - * Any "acceleration" value higher than this threshold - * get treated as LZ4_ACCELERATION_MAX instead (fix #876) - */ -#define LZ4_ACCELERATION_MAX 65537 - - -/*-************************************ -* CPU Feature Detection -**************************************/ -/* LZ4_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets which assembly generation depends on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */ -# if defined(__GNUC__) && \ - ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \ - || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define LZ4_FORCE_MEMORY_ACCESS 2 -# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) -# define LZ4_FORCE_MEMORY_ACCESS 1 -# endif -#endif - -/* - * LZ4_FORCE_SW_BITCOUNT - * Define this parameter if your target system or compiler does not support hardware bit count - */ -#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */ -# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */ -# define LZ4_FORCE_SW_BITCOUNT -#endif - - - -/*-************************************ -* Dependency -**************************************/ -/* - * LZ4_SRC_INCLUDED: - * Amalgamation flag, whether lz4.c is included - */ -#ifndef LZ4_SRC_INCLUDED -# define LZ4_SRC_INCLUDED 1 -#endif - -#ifndef LZ4_STATIC_LINKING_ONLY -#define LZ4_STATIC_LINKING_ONLY -#endif - -#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS -#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */ -#endif - -#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */ -#include "lz4.h" -/* see also "memory routines" below */ - - -/*-************************************ -* Compiler Options -**************************************/ -#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */ -# include /* only present in VS2005+ */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */ -#endif /* _MSC_VER */ - -#ifndef LZ4_FORCE_INLINE -# ifdef _MSC_VER /* Visual Studio */ -# define LZ4_FORCE_INLINE static __forceinline -# else -# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# ifdef __GNUC__ -# define LZ4_FORCE_INLINE static inline __attribute__((always_inline)) -# else -# define LZ4_FORCE_INLINE static inline -# endif -# else -# define LZ4_FORCE_INLINE static -# endif /* __STDC_VERSION__ */ -# endif /* _MSC_VER */ -#endif /* LZ4_FORCE_INLINE */ - -/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE - * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8, - * together with a simple 8-byte copy loop as a fall-back path. - * However, this optimization hurts the decompression speed by >30%, - * because the execution does not go to the optimized loop - * for typical compressible data, and all of the preamble checks - * before going to the fall-back path become useless overhead. - * This optimization happens only with the -O3 flag, and -O2 generates - * a simple 8-byte copy loop. - * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8 - * functions are annotated with __attribute__((optimize("O2"))), - * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute - * of LZ4_wildCopy8 does not affect the compression speed. - */ -#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__) -# define LZ4_FORCE_O2 __attribute__((optimize("O2"))) -# undef LZ4_FORCE_INLINE -# define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline)) -#else -# define LZ4_FORCE_O2 -#endif - -#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__) -# define expect(expr,value) (__builtin_expect ((expr),(value)) ) -#else -# define expect(expr,value) (expr) -#endif - -#ifndef likely -#define likely(expr) expect((expr) != 0, 1) -#endif -#ifndef unlikely -#define unlikely(expr) expect((expr) != 0, 0) -#endif - -/* Should the alignment test prove unreliable, for some reason, - * it can be disabled by setting LZ4_ALIGN_TEST to 0 */ -#ifndef LZ4_ALIGN_TEST /* can be externally provided */ -# define LZ4_ALIGN_TEST 1 -#endif - - -/*-************************************ -* Memory routines -**************************************/ - -/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION : - * Disable relatively high-level LZ4/HC functions that use dynamic memory - * allocation functions (malloc(), calloc(), free()). - * - * Note that this is a compile-time switch. And since it disables - * public/stable LZ4 v1 API functions, we don't recommend using this - * symbol to generate a library for distribution. - * - * The following public functions are removed when this symbol is defined. - * - lz4 : LZ4_createStream, LZ4_freeStream, - * LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create (deprecated) - * - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC, - * LZ4_createHC (deprecated), LZ4_freeHC (deprecated) - * - lz4frame, lz4file : All LZ4F_* functions - */ -#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -# define ALLOC(s) lz4_error_memory_allocation_is_disabled -# define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled -# define FREEMEM(p) lz4_error_memory_allocation_is_disabled -#elif defined(LZ4_USER_MEMORY_FUNCTIONS) -/* memory management functions can be customized by user project. - * Below functions must exist somewhere in the Project - * and be available at link time */ -void* LZ4_malloc(size_t s); -void* LZ4_calloc(size_t n, size_t s); -void LZ4_free(void* p); -# define ALLOC(s) LZ4_malloc(s) -# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s) -# define FREEMEM(p) LZ4_free(p) -#else -# include /* malloc, calloc, free */ -# define ALLOC(s) malloc(s) -# define ALLOC_AND_ZERO(s) calloc(1,s) -# define FREEMEM(p) free(p) -#endif - -#if ! LZ4_FREESTANDING -# include /* memset, memcpy */ -#endif -#if !defined(LZ4_memset) -# define LZ4_memset(p,v,s) memset((p),(v),(s)) -#endif -#define MEM_INIT(p,v,s) LZ4_memset((p),(v),(s)) - - -/*-************************************ -* Common Constants -**************************************/ -#define MINMATCH 4 - -#define WILDCOPYLENGTH 8 -#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ -#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ -#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */ -#define FASTLOOP_SAFE_DISTANCE 64 -static const int LZ4_minLength = (MFLIMIT+1); - -#define KB *(1 <<10) -#define MB *(1 <<20) -#define GB *(1U<<30) - -#define LZ4_DISTANCE_ABSOLUTE_MAX 65535 -#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */ -# error "LZ4_DISTANCE_MAX is too big : must be <= 65535" -#endif - -#define ML_BITS 4 -#define ML_MASK ((1U<=1) -# include -#else -# ifndef assert -# define assert(condition) ((void)0) -# endif -#endif - -#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */ - -#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) -# include - static int g_debuglog_enable = 1; -# define DEBUGLOG(l, ...) { \ - if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ - fprintf(stderr, __FILE__ " %i: ", __LINE__); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, " \n"); \ - } } -#else -# define DEBUGLOG(l, ...) {} /* disabled */ -#endif - -static int LZ4_isAligned(const void* ptr, size_t alignment) -{ - return ((size_t)ptr & (alignment -1)) == 0; -} - - -/*-************************************ -* Types -**************************************/ -#include -#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# include - typedef uint8_t BYTE; - typedef uint16_t U16; - typedef uint32_t U32; - typedef int32_t S32; - typedef uint64_t U64; - typedef uintptr_t uptrval; -#else -# if UINT_MAX != 4294967295UL -# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4" -# endif - typedef unsigned char BYTE; - typedef unsigned short U16; - typedef unsigned int U32; - typedef signed int S32; - typedef unsigned long long U64; - typedef size_t uptrval; /* generally true, except OpenVMS-64 */ -#endif - -#if defined(__x86_64__) - typedef U64 reg_t; /* 64-bits in x32 mode */ -#else - typedef size_t reg_t; /* 32-bits in x32 mode */ -#endif - -typedef enum { - notLimited = 0, - limitedOutput = 1, - fillOutput = 2 -} limitedOutput_directive; - - -/*-************************************ -* Reading and writing into memory -**************************************/ - -/** - * LZ4 relies on memcpy with a constant size being inlined. In freestanding - * environments, the compiler can't assume the implementation of memcpy() is - * standard compliant, so it can't apply its specialized memcpy() inlining - * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze - * memcpy() as if it were standard compliant, so it can inline it in freestanding - * environments. This is needed when decompressing the Linux Kernel, for example. - */ -#if !defined(LZ4_memcpy) -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size) -# else -# define LZ4_memcpy(dst, src, size) memcpy(dst, src, size) -# endif -#endif - -#if !defined(LZ4_memmove) -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define LZ4_memmove __builtin_memmove -# else -# define LZ4_memmove memmove -# endif -#endif - -static unsigned LZ4_isLittleEndian(void) -{ - const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ - return one.c[0]; -} - - -#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2) -/* lie to the compiler about data alignment; use with caution */ - -static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; } -static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; } -static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; } - -static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } -static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } - -#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef struct { U16 u16; } __attribute__((packed)) LZ4_unalign16; -typedef struct { U32 u32; } __attribute__((packed)) LZ4_unalign32; -typedef struct { reg_t uArch; } __attribute__((packed)) LZ4_unalignST; - -static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; } -static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; } -static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; } - -static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; } -static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign32*)memPtr)->u32 = value; } - -#else /* safe and portable access using memcpy() */ - -static U16 LZ4_read16(const void* memPtr) -{ - U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; -} - -static U32 LZ4_read32(const void* memPtr) -{ - U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; -} - -static reg_t LZ4_read_ARCH(const void* memPtr) -{ - reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; -} - -static void LZ4_write16(void* memPtr, U16 value) -{ - LZ4_memcpy(memPtr, &value, sizeof(value)); -} - -static void LZ4_write32(void* memPtr, U32 value) -{ - LZ4_memcpy(memPtr, &value, sizeof(value)); -} - -#endif /* LZ4_FORCE_MEMORY_ACCESS */ - - -static U16 LZ4_readLE16(const void* memPtr) -{ - if (LZ4_isLittleEndian()) { - return LZ4_read16(memPtr); - } else { - const BYTE* p = (const BYTE*)memPtr; - return (U16)((U16)p[0] + (p[1]<<8)); - } -} - -static void LZ4_writeLE16(void* memPtr, U16 value) -{ - if (LZ4_isLittleEndian()) { - LZ4_write16(memPtr, value); - } else { - BYTE* p = (BYTE*)memPtr; - p[0] = (BYTE) value; - p[1] = (BYTE)(value>>8); - } -} - -/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */ -LZ4_FORCE_INLINE -void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd) -{ - BYTE* d = (BYTE*)dstPtr; - const BYTE* s = (const BYTE*)srcPtr; - BYTE* const e = (BYTE*)dstEnd; - - do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d= 16. */ -LZ4_FORCE_INLINE void -LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd) -{ - BYTE* d = (BYTE*)dstPtr; - const BYTE* s = (const BYTE*)srcPtr; - BYTE* const e = (BYTE*)dstEnd; - - do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d= dstPtr + MINMATCH - * - there is at least 8 bytes available to write after dstEnd */ -LZ4_FORCE_INLINE void -LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset) -{ - BYTE v[8]; - - assert(dstEnd >= dstPtr + MINMATCH); - - switch(offset) { - case 1: - MEM_INIT(v, *srcPtr, 8); - break; - case 2: - LZ4_memcpy(v, srcPtr, 2); - LZ4_memcpy(&v[2], srcPtr, 2); -#if defined(_MSC_VER) && (_MSC_VER <= 1936) /* MSVC 2022 ver 17.6 or earlier */ -# pragma warning(push) -# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */ -#endif - LZ4_memcpy(&v[4], v, 4); -#if defined(_MSC_VER) && (_MSC_VER <= 1936) /* MSVC 2022 ver 17.6 or earlier */ -# pragma warning(pop) -#endif - break; - case 4: - LZ4_memcpy(v, srcPtr, 4); - LZ4_memcpy(&v[4], srcPtr, 4); - break; - default: - LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset); - return; - } - - LZ4_memcpy(dstPtr, v, 8); - dstPtr += 8; - while (dstPtr < dstEnd) { - LZ4_memcpy(dstPtr, v, 8); - dstPtr += 8; - } -} -#endif - - -/*-************************************ -* Common functions -**************************************/ -static unsigned LZ4_NbCommonBytes (reg_t val) -{ - assert(val != 0); - if (LZ4_isLittleEndian()) { - if (sizeof(val) == 8) { -# if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT) -/*-************************************************************************************************* -* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11. -* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics -* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC. -****************************************************************************************************/ -# if defined(__clang__) && (__clang_major__ < 10) - /* Avoid undefined clang-cl intrinsics issue. - * See https://github.com/lz4/lz4/pull/1017 for details. */ - return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3; -# else - /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */ - return (unsigned)_tzcnt_u64(val) >> 3; -# endif -# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanForward64(&r, (U64)val); - return (unsigned)r >> 3; -# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ - ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ - !defined(LZ4_FORCE_SW_BITCOUNT) - return (unsigned)__builtin_ctzll((U64)val) >> 3; -# else - const U64 m = 0x0101010101010101ULL; - val ^= val - 1; - return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56); -# endif - } else /* 32 bits */ { -# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r; - _BitScanForward(&r, (U32)val); - return (unsigned)r >> 3; -# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ - ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ - !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (unsigned)__builtin_ctz((U32)val) >> 3; -# else - const U32 m = 0x01010101; - return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24; -# endif - } - } else /* Big Endian CPU */ { - if (sizeof(val)==8) { -# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ - ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ - !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (unsigned)__builtin_clzll((U64)val) >> 3; -# else -#if 1 - /* this method is probably faster, - * but adds a 128 bytes lookup table */ - static const unsigned char ctz7_tab[128] = { - 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - }; - U64 const mask = 0x0101010101010101ULL; - U64 const t = (((val >> 8) - mask) | val) & mask; - return ctz7_tab[(t * 0x0080402010080402ULL) >> 57]; -#else - /* this method doesn't consume memory space like the previous one, - * but it contains several branches, - * that may end up slowing execution */ - static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits. - Just to avoid some static analyzer complaining about shift by 32 on 32-bits target. - Note that this code path is never triggered in 32-bits mode. */ - unsigned r; - if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; } - if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } - r += (!val); - return r; -#endif -# endif - } else /* 32 bits */ { -# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ - ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ - !defined(LZ4_FORCE_SW_BITCOUNT) - return (unsigned)__builtin_clz((U32)val) >> 3; -# else - val >>= 8; - val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) | - (val + 0x00FF0000)) >> 24; - return (unsigned)val ^ 3; -# endif - } - } -} - - -#define STEPSIZE sizeof(reg_t) -LZ4_FORCE_INLINE -unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) -{ - const BYTE* const pStart = pIn; - - if (likely(pIn < pInLimit-(STEPSIZE-1))) { - reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); - if (!diff) { - pIn+=STEPSIZE; pMatch+=STEPSIZE; - } else { - return LZ4_NbCommonBytes(diff); - } } - - while (likely(pIn < pInLimit-(STEPSIZE-1))) { - reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); - if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; } - pIn += LZ4_NbCommonBytes(diff); - return (unsigned)(pIn - pStart); - } - - if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; } - if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; } - if ((pIn compression run slower on incompressible data */ - - -/*-************************************ -* Local Structures and types -**************************************/ -typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t; - -/** - * This enum distinguishes several different modes of accessing previous - * content in the stream. - * - * - noDict : There is no preceding content. - * - withPrefix64k : Table entries up to ctx->dictSize before the current blob - * blob being compressed are valid and refer to the preceding - * content (of length ctx->dictSize), which is available - * contiguously preceding in memory the content currently - * being compressed. - * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere - * else in memory, starting at ctx->dictionary with length - * ctx->dictSize. - * - usingDictCtx : Everything concerning the preceding content is - * in a separate context, pointed to by ctx->dictCtx. - * ctx->dictionary, ctx->dictSize, and table entries - * in the current context that refer to positions - * preceding the beginning of the current compression are - * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx - * ->dictSize describe the location and size of the preceding - * content, and matches are found by looking in the ctx - * ->dictCtx->hashTable. - */ -typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive; -typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; - - -/*-************************************ -* Local Utils -**************************************/ -int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; } -const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; } -int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); } -int LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); } - - -/*-**************************************** -* Internal Definitions, used only in Tests -*******************************************/ -#if defined (__cplusplus) -extern "C" { -#endif - -int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize); - -int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, - int compressedSize, int maxOutputSize, - const void* dictStart, size_t dictSize); -int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest, - int compressedSize, int targetOutputSize, int dstCapacity, - const void* dictStart, size_t dictSize); -#if defined (__cplusplus) -} -#endif - -/*-****************************** -* Compression functions -********************************/ -LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType) -{ - if (tableType == byU16) - return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1))); - else - return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG)); -} - -LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType) -{ - const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG; - if (LZ4_isLittleEndian()) { - const U64 prime5bytes = 889523592379ULL; - return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog)); - } else { - const U64 prime8bytes = 11400714785074694791ULL; - return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog)); - } -} - -LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType) -{ - if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType); - return LZ4_hash4(LZ4_read32(p), tableType); -} - -LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType) -{ - switch (tableType) - { - default: /* fallthrough */ - case clearedTable: { /* illegal! */ assert(0); return; } - case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; } - case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; } - case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; } - } -} - -LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType) -{ - switch (tableType) - { - default: /* fallthrough */ - case clearedTable: /* fallthrough */ - case byPtr: { /* illegal! */ assert(0); return; } - case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; } - case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; } - } -} - -/* LZ4_putPosition*() : only used in byPtr mode */ -LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h, - void* tableBase, tableType_t const tableType) -{ - const BYTE** const hashTable = (const BYTE**)tableBase; - assert(tableType == byPtr); (void)tableType; - hashTable[h] = p; -} - -LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType) -{ - U32 const h = LZ4_hashPosition(p, tableType); - LZ4_putPositionOnHash(p, h, tableBase, tableType); -} - -/* LZ4_getIndexOnHash() : - * Index of match position registered in hash table. - * hash position must be calculated by using base+index, or dictBase+index. - * Assumption 1 : only valid if tableType == byU32 or byU16. - * Assumption 2 : h is presumed valid (within limits of hash table) - */ -LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType) -{ - LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2); - if (tableType == byU32) { - const U32* const hashTable = (const U32*) tableBase; - assert(h < (1U << (LZ4_MEMORY_USAGE-2))); - return hashTable[h]; - } - if (tableType == byU16) { - const U16* const hashTable = (const U16*) tableBase; - assert(h < (1U << (LZ4_MEMORY_USAGE-1))); - return hashTable[h]; - } - assert(0); return 0; /* forbidden case */ -} - -static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType) -{ - assert(tableType == byPtr); (void)tableType; - { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; } -} - -LZ4_FORCE_INLINE const BYTE* -LZ4_getPosition(const BYTE* p, - const void* tableBase, tableType_t tableType) -{ - U32 const h = LZ4_hashPosition(p, tableType); - return LZ4_getPositionOnHash(h, tableBase, tableType); -} - -LZ4_FORCE_INLINE void -LZ4_prepareTable(LZ4_stream_t_internal* const cctx, - const int inputSize, - const tableType_t tableType) { - /* If the table hasn't been used, it's guaranteed to be zeroed out, and is - * therefore safe to use no matter what mode we're in. Otherwise, we figure - * out if it's safe to leave as is or whether it needs to be reset. - */ - if ((tableType_t)cctx->tableType != clearedTable) { - assert(inputSize >= 0); - if ((tableType_t)cctx->tableType != tableType - || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU) - || ((tableType == byU32) && cctx->currentOffset > 1 GB) - || tableType == byPtr - || inputSize >= 4 KB) - { - DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx); - MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE); - cctx->currentOffset = 0; - cctx->tableType = (U32)clearedTable; - } else { - DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)"); - } - } - - /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, - * is faster than compressing without a gap. - * However, compressing with currentOffset == 0 is faster still, - * so we preserve that case. - */ - if (cctx->currentOffset != 0 && tableType == byU32) { - DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset"); - cctx->currentOffset += 64 KB; - } - - /* Finally, clear history */ - cctx->dictCtx = NULL; - cctx->dictionary = NULL; - cctx->dictSize = 0; -} - -/** LZ4_compress_generic() : - * inlined, to ensure branches are decided at compilation time. - * The following conditions are presumed already validated: - * - source != NULL - * - inputSize > 0 - */ -LZ4_FORCE_INLINE int LZ4_compress_generic_validated( - LZ4_stream_t_internal* const cctx, - const char* const source, - char* const dest, - const int inputSize, - int* inputConsumed, /* only written when outputDirective == fillOutput */ - const int maxOutputSize, - const limitedOutput_directive outputDirective, - const tableType_t tableType, - const dict_directive dictDirective, - const dictIssue_directive dictIssue, - const int acceleration) -{ - int result; - const BYTE* ip = (const BYTE*)source; - - U32 const startIndex = cctx->currentOffset; - const BYTE* base = (const BYTE*)source - startIndex; - const BYTE* lowLimit; - - const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx; - const BYTE* const dictionary = - dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary; - const U32 dictSize = - dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize; - const U32 dictDelta = - (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with indexes in current context */ - - int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx); - U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */ - const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary; - const BYTE* anchor = (const BYTE*) source; - const BYTE* const iend = ip + inputSize; - const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1; - const BYTE* const matchlimit = iend - LASTLITERALS; - - /* the dictCtx currentOffset is indexed on the start of the dictionary, - * while a dictionary in the current context precedes the currentOffset */ - const BYTE* dictBase = (dictionary == NULL) ? NULL : - (dictDirective == usingDictCtx) ? - dictionary + dictSize - dictCtx->currentOffset : - dictionary + dictSize - startIndex; - - BYTE* op = (BYTE*) dest; - BYTE* const olimit = op + maxOutputSize; - - U32 offset = 0; - U32 forwardH; - - DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType); - assert(ip != NULL); - if (tableType == byU16) assert(inputSize= 1); - - lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0); - - /* Update context state */ - if (dictDirective == usingDictCtx) { - /* Subsequent linked blocks can't use the dictionary. */ - /* Instead, they use the block we just compressed. */ - cctx->dictCtx = NULL; - cctx->dictSize = (U32)inputSize; - } else { - cctx->dictSize += (U32)inputSize; - } - cctx->currentOffset += (U32)inputSize; - cctx->tableType = (U32)tableType; - - if (inputSizehashTable, byPtr); - } else { - LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType); - } } - ip++; forwardH = LZ4_hashPosition(ip, tableType); - - /* Main Loop */ - for ( ; ; ) { - const BYTE* match; - BYTE* token; - const BYTE* filledIp; - - /* Find a match */ - if (tableType == byPtr) { - const BYTE* forwardIp = ip; - int step = 1; - int searchMatchNb = acceleration << LZ4_skipTrigger; - do { - U32 const h = forwardH; - ip = forwardIp; - forwardIp += step; - step = (searchMatchNb++ >> LZ4_skipTrigger); - - if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; - assert(ip < mflimitPlusOne); - - match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType); - forwardH = LZ4_hashPosition(forwardIp, tableType); - LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType); - - } while ( (match+LZ4_DISTANCE_MAX < ip) - || (LZ4_read32(match) != LZ4_read32(ip)) ); - - } else { /* byU32, byU16 */ - - const BYTE* forwardIp = ip; - int step = 1; - int searchMatchNb = acceleration << LZ4_skipTrigger; - do { - U32 const h = forwardH; - U32 const current = (U32)(forwardIp - base); - U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); - assert(matchIndex <= current); - assert(forwardIp - base < (ptrdiff_t)(2 GB - 1)); - ip = forwardIp; - forwardIp += step; - step = (searchMatchNb++ >> LZ4_skipTrigger); - - if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; - assert(ip < mflimitPlusOne); - - if (dictDirective == usingDictCtx) { - if (matchIndex < startIndex) { - /* there was no match, try the dictionary */ - assert(tableType == byU32); - matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); - match = dictBase + matchIndex; - matchIndex += dictDelta; /* make dictCtx index comparable with current context */ - lowLimit = dictionary; - } else { - match = base + matchIndex; - lowLimit = (const BYTE*)source; - } - } else if (dictDirective == usingExtDict) { - if (matchIndex < startIndex) { - DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex); - assert(startIndex - matchIndex >= MINMATCH); - assert(dictBase); - match = dictBase + matchIndex; - lowLimit = dictionary; - } else { - match = base + matchIndex; - lowLimit = (const BYTE*)source; - } - } else { /* single continuous memory segment */ - match = base + matchIndex; - } - forwardH = LZ4_hashPosition(forwardIp, tableType); - LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); - - DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex); - if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */ - assert(matchIndex < current); - if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX)) - && (matchIndex+LZ4_DISTANCE_MAX < current)) { - continue; - } /* too far */ - assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */ - - if (LZ4_read32(match) == LZ4_read32(ip)) { - if (maybe_extMem) offset = current - matchIndex; - break; /* match found */ - } - - } while(1); - } - - /* Catch up */ - filledIp = ip; - while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; } - - /* Encode Literals */ - { unsigned const litLength = (unsigned)(ip - anchor); - token = op++; - if ((outputDirective == limitedOutput) && /* Check output buffer overflow */ - (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) { - return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ - } - if ((outputDirective == fillOutput) && - (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) { - op--; - goto _last_literals; - } - if (litLength >= RUN_MASK) { - int len = (int)(litLength - RUN_MASK); - *token = (RUN_MASK<= 255 ; len-=255) *op++ = 255; - *op++ = (BYTE)len; - } - else *token = (BYTE)(litLength< olimit)) { - /* the match was too close to the end, rewind and go to last literals */ - op = token; - goto _last_literals; - } - - /* Encode Offset */ - if (maybe_extMem) { /* static test */ - DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source)); - assert(offset <= LZ4_DISTANCE_MAX && offset > 0); - LZ4_writeLE16(op, (U16)offset); op+=2; - } else { - DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match)); - assert(ip-match <= LZ4_DISTANCE_MAX); - LZ4_writeLE16(op, (U16)(ip - match)); op+=2; - } - - /* Encode MatchLength */ - { unsigned matchCode; - - if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx) - && (lowLimit==dictionary) /* match within extDict */ ) { - const BYTE* limit = ip + (dictEnd-match); - assert(dictEnd > match); - if (limit > matchlimit) limit = matchlimit; - matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit); - ip += (size_t)matchCode + MINMATCH; - if (ip==limit) { - unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit); - matchCode += more; - ip += more; - } - DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH); - } else { - matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit); - ip += (size_t)matchCode + MINMATCH; - DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH); - } - - if ((outputDirective) && /* Check output buffer overflow */ - (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) { - if (outputDirective == fillOutput) { - /* Match description too long : reduce it */ - U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255; - ip -= matchCode - newMatchCode; - assert(newMatchCode < matchCode); - matchCode = newMatchCode; - if (unlikely(ip <= filledIp)) { - /* We have already filled up to filledIp so if ip ends up less than filledIp - * we have positions in the hash table beyond the current position. This is - * a problem if we reuse the hash table. So we have to remove these positions - * from the hash table. - */ - const BYTE* ptr; - DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip)); - for (ptr = ip; ptr <= filledIp; ++ptr) { - U32 const h = LZ4_hashPosition(ptr, tableType); - LZ4_clearHash(h, cctx->hashTable, tableType); - } - } - } else { - assert(outputDirective == limitedOutput); - return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ - } - } - if (matchCode >= ML_MASK) { - *token += ML_MASK; - matchCode -= ML_MASK; - LZ4_write32(op, 0xFFFFFFFF); - while (matchCode >= 4*255) { - op+=4; - LZ4_write32(op, 0xFFFFFFFF); - matchCode -= 4*255; - } - op += matchCode / 255; - *op++ = (BYTE)(matchCode % 255); - } else - *token += (BYTE)(matchCode); - } - /* Ensure we have enough space for the last literals. */ - assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit)); - - anchor = ip; - - /* Test end of chunk */ - if (ip >= mflimitPlusOne) break; - - /* Fill table */ - { U32 const h = LZ4_hashPosition(ip-2, tableType); - if (tableType == byPtr) { - LZ4_putPositionOnHash(ip-2, h, cctx->hashTable, byPtr); - } else { - U32 const idx = (U32)((ip-2) - base); - LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType); - } } - - /* Test next position */ - if (tableType == byPtr) { - - match = LZ4_getPosition(ip, cctx->hashTable, tableType); - LZ4_putPosition(ip, cctx->hashTable, tableType); - if ( (match+LZ4_DISTANCE_MAX >= ip) - && (LZ4_read32(match) == LZ4_read32(ip)) ) - { token=op++; *token=0; goto _next_match; } - - } else { /* byU32, byU16 */ - - U32 const h = LZ4_hashPosition(ip, tableType); - U32 const current = (U32)(ip-base); - U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); - assert(matchIndex < current); - if (dictDirective == usingDictCtx) { - if (matchIndex < startIndex) { - /* there was no match, try the dictionary */ - assert(tableType == byU32); - matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); - match = dictBase + matchIndex; - lowLimit = dictionary; /* required for match length counter */ - matchIndex += dictDelta; - } else { - match = base + matchIndex; - lowLimit = (const BYTE*)source; /* required for match length counter */ - } - } else if (dictDirective==usingExtDict) { - if (matchIndex < startIndex) { - assert(dictBase); - match = dictBase + matchIndex; - lowLimit = dictionary; /* required for match length counter */ - } else { - match = base + matchIndex; - lowLimit = (const BYTE*)source; /* required for match length counter */ - } - } else { /* single memory segment */ - match = base + matchIndex; - } - LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); - assert(matchIndex < current); - if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1) - && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current)) - && (LZ4_read32(match) == LZ4_read32(ip)) ) { - token=op++; - *token=0; - if (maybe_extMem) offset = current - matchIndex; - DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i", - (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source)); - goto _next_match; - } - } - - /* Prepare next loop */ - forwardH = LZ4_hashPosition(++ip, tableType); - - } - -_last_literals: - /* Encode Last Literals */ - { size_t lastRun = (size_t)(iend - anchor); - if ( (outputDirective) && /* Check output buffer overflow */ - (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) { - if (outputDirective == fillOutput) { - /* adapt lastRun to fill 'dst' */ - assert(olimit >= op); - lastRun = (size_t)(olimit-op) - 1/*token*/; - lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/ - } else { - assert(outputDirective == limitedOutput); - return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ - } - } - DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun); - if (lastRun >= RUN_MASK) { - size_t accumulator = lastRun - RUN_MASK; - *op++ = RUN_MASK << ML_BITS; - for(; accumulator >= 255 ; accumulator-=255) *op++ = 255; - *op++ = (BYTE) accumulator; - } else { - *op++ = (BYTE)(lastRun< 0); - DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result); - return result; -} - -/** LZ4_compress_generic() : - * inlined, to ensure branches are decided at compilation time; - * takes care of src == (NULL, 0) - * and forward the rest to LZ4_compress_generic_validated */ -LZ4_FORCE_INLINE int LZ4_compress_generic( - LZ4_stream_t_internal* const cctx, - const char* const src, - char* const dst, - const int srcSize, - int *inputConsumed, /* only written when outputDirective == fillOutput */ - const int dstCapacity, - const limitedOutput_directive outputDirective, - const tableType_t tableType, - const dict_directive dictDirective, - const dictIssue_directive dictIssue, - const int acceleration) -{ - DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i", - srcSize, dstCapacity); - - if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */ - if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */ - if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */ - DEBUGLOG(5, "Generating an empty block"); - assert(outputDirective == notLimited || dstCapacity >= 1); - assert(dst != NULL); - dst[0] = 0; - if (outputDirective == fillOutput) { - assert (inputConsumed != NULL); - *inputConsumed = 0; - } - return 1; - } - assert(src != NULL); - - return LZ4_compress_generic_validated(cctx, src, dst, srcSize, - inputConsumed, /* only written into if outputDirective == fillOutput */ - dstCapacity, outputDirective, - tableType, dictDirective, dictIssue, acceleration); -} - - -int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) -{ - LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse; - assert(ctx != NULL); - if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; - if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; - if (maxOutputSize >= LZ4_compressBound(inputSize)) { - if (inputSize < LZ4_64Klimit) { - return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration); - } else { - const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32; - return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); - } - } else { - if (inputSize < LZ4_64Klimit) { - return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); - } else { - const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32; - return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration); - } - } -} - -/** - * LZ4_compress_fast_extState_fastReset() : - * A variant of LZ4_compress_fast_extState(). - * - * Using this variant avoids an expensive initialization step. It is only safe - * to call if the state buffer is known to be correctly initialized already - * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of - * "correctly initialized"). - */ -int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration) -{ - LZ4_stream_t_internal* const ctx = &((LZ4_stream_t*)state)->internal_donotuse; - if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; - if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; - assert(ctx != NULL); - - if (dstCapacity >= LZ4_compressBound(srcSize)) { - if (srcSize < LZ4_64Klimit) { - const tableType_t tableType = byU16; - LZ4_prepareTable(ctx, srcSize, tableType); - if (ctx->currentOffset) { - return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration); - } else { - return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); - } - } else { - const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; - LZ4_prepareTable(ctx, srcSize, tableType); - return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); - } - } else { - if (srcSize < LZ4_64Klimit) { - const tableType_t tableType = byU16; - LZ4_prepareTable(ctx, srcSize, tableType); - if (ctx->currentOffset) { - return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration); - } else { - return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); - } - } else { - const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; - LZ4_prepareTable(ctx, srcSize, tableType); - return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); - } - } -} - - -int LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity, int acceleration) -{ - int result; -#if (LZ4_HEAPMODE) - LZ4_stream_t* const ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ - if (ctxPtr == NULL) return 0; -#else - LZ4_stream_t ctx; - LZ4_stream_t* const ctxPtr = &ctx; -#endif - result = LZ4_compress_fast_extState(ctxPtr, src, dest, srcSize, dstCapacity, acceleration); - -#if (LZ4_HEAPMODE) - FREEMEM(ctxPtr); -#endif - return result; -} - - -int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity) -{ - return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1); -} - - -/* Note!: This function leaves the stream in an unclean/broken state! - * It is not safe to subsequently use the same state with a _fastReset() or - * _continue() call without resetting it. */ -static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize) -{ - void* const s = LZ4_initStream(state, sizeof (*state)); - assert(s != NULL); (void)s; - - if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */ - return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1); - } else { - if (*srcSizePtr < LZ4_64Klimit) { - return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1); - } else { - tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; - return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1); - } } -} - - -int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize) -{ -#if (LZ4_HEAPMODE) - LZ4_stream_t* const ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ - if (ctx == NULL) return 0; -#else - LZ4_stream_t ctxBody; - LZ4_stream_t* const ctx = &ctxBody; -#endif - - int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize); - -#if (LZ4_HEAPMODE) - FREEMEM(ctx); -#endif - return result; -} - - - -/*-****************************** -* Streaming functions -********************************/ - -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -LZ4_stream_t* LZ4_createStream(void) -{ - LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); - LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal)); - DEBUGLOG(4, "LZ4_createStream %p", lz4s); - if (lz4s == NULL) return NULL; - LZ4_initStream(lz4s, sizeof(*lz4s)); - return lz4s; -} -#endif - -static size_t LZ4_stream_t_alignment(void) -{ -#if LZ4_ALIGN_TEST - typedef struct { char c; LZ4_stream_t t; } t_a; - return sizeof(t_a) - sizeof(LZ4_stream_t); -#else - return 1; /* effectively disabled */ -#endif -} - -LZ4_stream_t* LZ4_initStream (void* buffer, size_t size) -{ - DEBUGLOG(5, "LZ4_initStream"); - if (buffer == NULL) { return NULL; } - if (size < sizeof(LZ4_stream_t)) { return NULL; } - if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL; - MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal)); - return (LZ4_stream_t*)buffer; -} - -/* resetStream is now deprecated, - * prefer initStream() which is more general */ -void LZ4_resetStream (LZ4_stream_t* LZ4_stream) -{ - DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream); - MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal)); -} - -void LZ4_resetStream_fast(LZ4_stream_t* ctx) { - LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32); -} - -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -int LZ4_freeStream (LZ4_stream_t* LZ4_stream) -{ - if (!LZ4_stream) return 0; /* support free on NULL */ - DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream); - FREEMEM(LZ4_stream); - return (0); -} -#endif - - -#define HASH_UNIT sizeof(reg_t) -int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) -{ - LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse; - const tableType_t tableType = byU32; - const BYTE* p = (const BYTE*)dictionary; - const BYTE* const dictEnd = p + dictSize; - U32 idx32; - - DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict); - - /* It's necessary to reset the context, - * and not just continue it with prepareTable() - * to avoid any risk of generating overflowing matchIndex - * when compressing using this dictionary */ - LZ4_resetStream(LZ4_dict); - - /* We always increment the offset by 64 KB, since, if the dict is longer, - * we truncate it to the last 64k, and if it's shorter, we still want to - * advance by a whole window length so we can provide the guarantee that - * there are only valid offsets in the window, which allows an optimization - * in LZ4_compress_fast_continue() where it uses noDictIssue even when the - * dictionary isn't a full 64k. */ - dict->currentOffset += 64 KB; - - if (dictSize < (int)HASH_UNIT) { - return 0; - } - - if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB; - dict->dictionary = p; - dict->dictSize = (U32)(dictEnd - p); - dict->tableType = (U32)tableType; - idx32 = dict->currentOffset - dict->dictSize; - - while (p <= dictEnd-HASH_UNIT) { - U32 const h = LZ4_hashPosition(p, tableType); - LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType); - p+=3; idx32+=3; - } - - return (int)dict->dictSize; -} - -void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) -{ - const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL : - &(dictionaryStream->internal_donotuse); - - DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)", - workingStream, dictionaryStream, - dictCtx != NULL ? dictCtx->dictSize : 0); - - if (dictCtx != NULL) { - /* If the current offset is zero, we will never look in the - * external dictionary context, since there is no value a table - * entry can take that indicate a miss. In that case, we need - * to bump the offset to something non-zero. - */ - if (workingStream->internal_donotuse.currentOffset == 0) { - workingStream->internal_donotuse.currentOffset = 64 KB; - } - - /* Don't actually attach an empty dictionary. - */ - if (dictCtx->dictSize == 0) { - dictCtx = NULL; - } - } - workingStream->internal_donotuse.dictCtx = dictCtx; -} - - -static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize) -{ - assert(nextSize >= 0); - if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */ - /* rescale hash table */ - U32 const delta = LZ4_dict->currentOffset - 64 KB; - const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; - int i; - DEBUGLOG(4, "LZ4_renormDictT"); - for (i=0; ihashTable[i] < delta) LZ4_dict->hashTable[i]=0; - else LZ4_dict->hashTable[i] -= delta; - } - LZ4_dict->currentOffset = 64 KB; - if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB; - LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize; - } -} - - -int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, - const char* source, char* dest, - int inputSize, int maxOutputSize, - int acceleration) -{ - const tableType_t tableType = byU32; - LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse; - const char* dictEnd = streamPtr->dictSize ? (const char*)streamPtr->dictionary + streamPtr->dictSize : NULL; - - DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)", inputSize, streamPtr->dictSize); - - LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */ - if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; - if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; - - /* invalidate tiny dictionaries */ - if ( (streamPtr->dictSize < 4) /* tiny dictionary : not enough for a hash */ - && (dictEnd != source) /* prefix mode */ - && (inputSize > 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */ - && (streamPtr->dictCtx == NULL) /* usingDictCtx */ - ) { - DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary); - /* remove dictionary existence from history, to employ faster prefix mode */ - streamPtr->dictSize = 0; - streamPtr->dictionary = (const BYTE*)source; - dictEnd = source; - } - - /* Check overlapping input/dictionary space */ - { const char* const sourceEnd = source + inputSize; - if ((sourceEnd > (const char*)streamPtr->dictionary) && (sourceEnd < dictEnd)) { - streamPtr->dictSize = (U32)(dictEnd - sourceEnd); - if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB; - if (streamPtr->dictSize < 4) streamPtr->dictSize = 0; - streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize; - } - } - - /* prefix mode : source data follows dictionary */ - if (dictEnd == source) { - if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) - return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration); - else - return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration); - } - - /* external dictionary mode */ - { int result; - if (streamPtr->dictCtx) { - /* We depend here on the fact that dictCtx'es (produced by - * LZ4_loadDict) guarantee that their tables contain no references - * to offsets between dictCtx->currentOffset - 64 KB and - * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe - * to use noDictIssue even when the dict isn't a full 64 KB. - */ - if (inputSize > 4 KB) { - /* For compressing large blobs, it is faster to pay the setup - * cost to copy the dictionary's tables into the active context, - * so that the compression loop is only looking into one table. - */ - LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr)); - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); - } else { - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration); - } - } else { /* small data <= 4 KB */ - if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration); - } else { - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); - } - } - streamPtr->dictionary = (const BYTE*)source; - streamPtr->dictSize = (U32)inputSize; - return result; - } -} - - -/* Hidden debug function, to force-test external dictionary mode */ -int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize) -{ - LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse; - int result; - - LZ4_renormDictT(streamPtr, srcSize); - - if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { - result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1); - } else { - result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1); - } - - streamPtr->dictionary = (const BYTE*)source; - streamPtr->dictSize = (U32)srcSize; - - return result; -} - - -/*! LZ4_saveDict() : - * If previously compressed data block is not guaranteed to remain available at its memory location, - * save it into a safer place (char* safeBuffer). - * Note : no need to call LZ4_loadDict() afterwards, dictionary is immediately usable, - * one can therefore call LZ4_compress_fast_continue() right after. - * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error. - */ -int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) -{ - LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse; - - DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize, safeBuffer); - - if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */ - if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; } - - if (safeBuffer == NULL) assert(dictSize == 0); - if (dictSize > 0) { - const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize; - assert(dict->dictionary); - LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize); - } - - dict->dictionary = (const BYTE*)safeBuffer; - dict->dictSize = (U32)dictSize; - - return dictSize; -} - - - -/*-******************************* - * Decompression functions - ********************************/ - -typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive; - -#undef MIN -#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) - - -/* variant for decompress_unsafe() - * does not know end of input - * presumes input is well formed - * note : will consume at least one byte */ -static size_t read_long_length_no_check(const BYTE** pp) -{ - size_t b, l = 0; - do { b = **pp; (*pp)++; l += b; } while (b==255); - DEBUGLOG(6, "read_long_length_no_check: +length=%zu using %zu input bytes", l, l/255 + 1) - return l; -} - -/* core decoder variant for LZ4_decompress_fast*() - * for legacy support only : these entry points are deprecated. - * - Presumes input is correctly formed (no defense vs malformed inputs) - * - Does not know input size (presume input buffer is "large enough") - * - Decompress a full block (only) - * @return : nb of bytes read from input. - * Note : this variant is not optimized for speed, just for maintenance. - * the goal is to remove support of decompress_fast*() variants by v2.0 -**/ -LZ4_FORCE_INLINE int -LZ4_decompress_unsafe_generic( - const BYTE* const istart, - BYTE* const ostart, - int decompressedSize, - - size_t prefixSize, - const BYTE* const dictStart, /* only if dict==usingExtDict */ - const size_t dictSize /* note: =0 if dictStart==NULL */ - ) -{ - const BYTE* ip = istart; - BYTE* op = (BYTE*)ostart; - BYTE* const oend = ostart + decompressedSize; - const BYTE* const prefixStart = ostart - prefixSize; - - DEBUGLOG(5, "LZ4_decompress_unsafe_generic"); - if (dictStart == NULL) assert(dictSize == 0); - - while (1) { - /* start new sequence */ - unsigned token = *ip++; - - /* literals */ - { size_t ll = token >> ML_BITS; - if (ll==15) { - /* long literal length */ - ll += read_long_length_no_check(&ip); - } - if ((size_t)(oend-op) < ll) return -1; /* output buffer overflow */ - LZ4_memmove(op, ip, ll); /* support in-place decompression */ - op += ll; - ip += ll; - if ((size_t)(oend-op) < MFLIMIT) { - if (op==oend) break; /* end of block */ - DEBUGLOG(5, "invalid: literals end at distance %zi from end of block", oend-op); - /* incorrect end of block : - * last match must start at least MFLIMIT==12 bytes before end of output block */ - return -1; - } } - - /* match */ - { size_t ml = token & 15; - size_t const offset = LZ4_readLE16(ip); - ip+=2; - - if (ml==15) { - /* long literal length */ - ml += read_long_length_no_check(&ip); - } - ml += MINMATCH; - - if ((size_t)(oend-op) < ml) return -1; /* output buffer overflow */ - - { const BYTE* match = op - offset; - - /* out of range */ - if (offset > (size_t)(op - prefixStart) + dictSize) { - DEBUGLOG(6, "offset out of range"); - return -1; - } - - /* check special case : extDict */ - if (offset > (size_t)(op - prefixStart)) { - /* extDict scenario */ - const BYTE* const dictEnd = dictStart + dictSize; - const BYTE* extMatch = dictEnd - (offset - (size_t)(op-prefixStart)); - size_t const extml = (size_t)(dictEnd - extMatch); - if (extml > ml) { - /* match entirely within extDict */ - LZ4_memmove(op, extMatch, ml); - op += ml; - ml = 0; - } else { - /* match split between extDict & prefix */ - LZ4_memmove(op, extMatch, extml); - op += extml; - ml -= extml; - } - match = prefixStart; - } - - /* match copy - slow variant, supporting overlap copy */ - { size_t u; - for (u=0; u= ipmax before start of loop. Returns initial_error if so. - * @error (output) - error code. Must be set to 0 before call. -**/ -typedef size_t Rvl_t; -static const Rvl_t rvl_error = (Rvl_t)(-1); -LZ4_FORCE_INLINE Rvl_t -read_variable_length(const BYTE** ip, const BYTE* ilimit, - int initial_check) -{ - Rvl_t s, length = 0; - assert(ip != NULL); - assert(*ip != NULL); - assert(ilimit != NULL); - if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */ - return rvl_error; - } - do { - s = **ip; - (*ip)++; - length += s; - if (unlikely((*ip) > ilimit)) { /* read limit reached */ - return rvl_error; - } - /* accumulator overflow detection (32-bit mode only) */ - if ((sizeof(length)<8) && unlikely(length > ((Rvl_t)(-1)/2)) ) { - return rvl_error; - } - } while (s==255); - - return length; -} - -/*! LZ4_decompress_generic() : - * This generic decompression function covers all use cases. - * It shall be instantiated several times, using different sets of directives. - * Note that it is important for performance that this function really get inlined, - * in order to remove useless branches during compilation optimization. - */ -LZ4_FORCE_INLINE int -LZ4_decompress_generic( - const char* const src, - char* const dst, - int srcSize, - int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */ - - earlyEnd_directive partialDecoding, /* full, partial */ - dict_directive dict, /* noDict, withPrefix64k, usingExtDict */ - const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */ - const BYTE* const dictStart, /* only if dict==usingExtDict */ - const size_t dictSize /* note : = 0 if noDict */ - ) -{ - if ((src == NULL) || (outputSize < 0)) { return -1; } - - { const BYTE* ip = (const BYTE*) src; - const BYTE* const iend = ip + srcSize; - - BYTE* op = (BYTE*) dst; - BYTE* const oend = op + outputSize; - BYTE* cpy; - - const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize; - - const int checkOffset = (dictSize < (int)(64 KB)); - - - /* Set up the "end" pointers for the shortcut. */ - const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/; - const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/; - - const BYTE* match; - size_t offset; - unsigned token; - size_t length; - - - DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize); - - /* Special cases */ - assert(lowPrefix <= op); - if (unlikely(outputSize==0)) { - /* Empty output buffer */ - if (partialDecoding) return 0; - return ((srcSize==1) && (*ip==0)) ? 0 : -1; - } - if (unlikely(srcSize==0)) { return -1; } - - /* LZ4_FAST_DEC_LOOP: - * designed for modern OoO performance cpus, - * where copying reliably 32-bytes is preferable to an unpredictable branch. - * note : fast loop may show a regression for some client arm chips. */ -#if LZ4_FAST_DEC_LOOP - if ((oend - op) < FASTLOOP_SAFE_DISTANCE) { - DEBUGLOG(6, "skip fast decode loop"); - goto safe_decode; - } - - /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */ - DEBUGLOG(6, "using fast decode loop"); - while (1) { - /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */ - assert(oend - op >= FASTLOOP_SAFE_DISTANCE); - assert(ip < iend); - token = *ip++; - length = token >> ML_BITS; /* literal length */ - - /* decode literal length */ - if (length == RUN_MASK) { - size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1); - if (addl == rvl_error) { - DEBUGLOG(6, "error reading long literal length"); - goto _output_error; - } - length += addl; - if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ - if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ - - /* copy literals */ - cpy = op+length; - LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); - if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; } - LZ4_wildCopy32(op, ip, cpy); - ip += length; op = cpy; - } else { - cpy = op+length; - DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length); - /* We don't need to check oend, since we check it once for each loop below */ - if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; } - /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */ - LZ4_memcpy(op, ip, 16); - ip += length; op = cpy; - } - - /* get offset */ - offset = LZ4_readLE16(ip); ip+=2; - DEBUGLOG(6, " offset = %zu", offset); - match = op - offset; - assert(match <= op); /* overflow check */ - - /* get matchlength */ - length = token & ML_MASK; - - if (length == ML_MASK) { - size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0); - if (addl == rvl_error) { - DEBUGLOG(6, "error reading long match length"); - goto _output_error; - } - length += addl; - length += MINMATCH; - if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */ - if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { - DEBUGLOG(6, "Error : offset outside buffers"); - goto _output_error; - } - if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { - goto safe_match_copy; - } - } else { - length += MINMATCH; - if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { - goto safe_match_copy; - } - - /* Fastpath check: skip LZ4_wildCopy32 when true */ - if ((dict == withPrefix64k) || (match >= lowPrefix)) { - if (offset >= 8) { - assert(match >= lowPrefix); - assert(match <= op); - assert(op + 18 <= oend); - - LZ4_memcpy(op, match, 8); - LZ4_memcpy(op+8, match+8, 8); - LZ4_memcpy(op+16, match+16, 2); - op += length; - continue; - } } } - - if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) { - DEBUGLOG(6, "Error : pos=%zi, offset=%zi => outside buffers", op-lowPrefix, op-match); - goto _output_error; - } - /* match starting within external dictionary */ - if ((dict==usingExtDict) && (match < lowPrefix)) { - assert(dictEnd != NULL); - if (unlikely(op+length > oend-LASTLITERALS)) { - if (partialDecoding) { - DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd"); - length = MIN(length, (size_t)(oend-op)); - } else { - DEBUGLOG(6, "end-of-block condition violated") - goto _output_error; - } } - - if (length <= (size_t)(lowPrefix-match)) { - /* match fits entirely within external dictionary : just copy */ - LZ4_memmove(op, dictEnd - (lowPrefix-match), length); - op += length; - } else { - /* match stretches into both external dictionary and current block */ - size_t const copySize = (size_t)(lowPrefix - match); - size_t const restSize = length - copySize; - LZ4_memcpy(op, dictEnd - copySize, copySize); - op += copySize; - if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ - BYTE* const endOfMatch = op + restSize; - const BYTE* copyFrom = lowPrefix; - while (op < endOfMatch) { *op++ = *copyFrom++; } - } else { - LZ4_memcpy(op, lowPrefix, restSize); - op += restSize; - } } - continue; - } - - /* copy match within block */ - cpy = op + length; - - assert((op <= oend) && (oend-op >= 32)); - if (unlikely(offset<16)) { - LZ4_memcpy_using_offset(op, match, cpy, offset); - } else { - LZ4_wildCopy32(op, match, cpy); - } - - op = cpy; /* wildcopy correction */ - } - safe_decode: -#endif - - /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */ - DEBUGLOG(6, "using safe decode loop"); - while (1) { - assert(ip < iend); - token = *ip++; - length = token >> ML_BITS; /* literal length */ - - /* A two-stage shortcut for the most common case: - * 1) If the literal length is 0..14, and there is enough space, - * enter the shortcut and copy 16 bytes on behalf of the literals - * (in the fast mode, only 8 bytes can be safely copied this way). - * 2) Further if the match length is 4..18, copy 18 bytes in a similar - * manner; but we ensure that there's enough space in the output for - * those 18 bytes earlier, upon entering the shortcut (in other words, - * there is a combined check for both stages). - */ - if ( (length != RUN_MASK) - /* strictly "less than" on input, to re-enter the loop with at least one byte */ - && likely((ip < shortiend) & (op <= shortoend)) ) { - /* Copy the literals */ - LZ4_memcpy(op, ip, 16); - op += length; ip += length; - - /* The second stage: prepare for match copying, decode full info. - * If it doesn't work out, the info won't be wasted. */ - length = token & ML_MASK; /* match length */ - offset = LZ4_readLE16(ip); ip += 2; - match = op - offset; - assert(match <= op); /* check overflow */ - - /* Do not deal with overlapping matches. */ - if ( (length != ML_MASK) - && (offset >= 8) - && (dict==withPrefix64k || match >= lowPrefix) ) { - /* Copy the match. */ - LZ4_memcpy(op + 0, match + 0, 8); - LZ4_memcpy(op + 8, match + 8, 8); - LZ4_memcpy(op +16, match +16, 2); - op += length + MINMATCH; - /* Both stages worked, load the next token. */ - continue; - } - - /* The second stage didn't work out, but the info is ready. - * Propel it right to the point of match copying. */ - goto _copy_match; - } - - /* decode literal length */ - if (length == RUN_MASK) { - size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1); - if (addl == rvl_error) { goto _output_error; } - length += addl; - if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ - if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ - } - - /* copy literals */ - cpy = op+length; -#if LZ4_FAST_DEC_LOOP - safe_literal_copy: -#endif - LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); - if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) { - /* We've either hit the input parsing restriction or the output parsing restriction. - * In the normal scenario, decoding a full block, it must be the last sequence, - * otherwise it's an error (invalid input or dimensions). - * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow. - */ - if (partialDecoding) { - /* Since we are partial decoding we may be in this block because of the output parsing - * restriction, which is not valid since the output buffer is allowed to be undersized. - */ - DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end") - DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length); - DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op)); - DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip)); - /* Finishing in the middle of a literals segment, - * due to lack of input. - */ - if (ip+length > iend) { - length = (size_t)(iend-ip); - cpy = op + length; - } - /* Finishing in the middle of a literals segment, - * due to lack of output space. - */ - if (cpy > oend) { - cpy = oend; - assert(op<=oend); - length = (size_t)(oend-op); - } - } else { - /* We must be on the last sequence (or invalid) because of the parsing limitations - * so check that we exactly consume the input and don't overrun the output buffer. - */ - if ((ip+length != iend) || (cpy > oend)) { - DEBUGLOG(6, "should have been last run of literals") - DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend); - DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend); - goto _output_error; - } - } - LZ4_memmove(op, ip, length); /* supports overlapping memory regions, for in-place decompression scenarios */ - ip += length; - op += length; - /* Necessarily EOF when !partialDecoding. - * When partialDecoding, it is EOF if we've either - * filled the output buffer or - * can't proceed with reading an offset for following match. - */ - if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) { - break; - } - } else { - LZ4_wildCopy8(op, ip, cpy); /* can overwrite up to 8 bytes beyond cpy */ - ip += length; op = cpy; - } - - /* get offset */ - offset = LZ4_readLE16(ip); ip+=2; - match = op - offset; - - /* get matchlength */ - length = token & ML_MASK; - - _copy_match: - if (length == ML_MASK) { - size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0); - if (addl == rvl_error) { goto _output_error; } - length += addl; - if (unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */ - } - length += MINMATCH; - -#if LZ4_FAST_DEC_LOOP - safe_match_copy: -#endif - if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */ - /* match starting within external dictionary */ - if ((dict==usingExtDict) && (match < lowPrefix)) { - assert(dictEnd != NULL); - if (unlikely(op+length > oend-LASTLITERALS)) { - if (partialDecoding) length = MIN(length, (size_t)(oend-op)); - else goto _output_error; /* doesn't respect parsing restriction */ - } - - if (length <= (size_t)(lowPrefix-match)) { - /* match fits entirely within external dictionary : just copy */ - LZ4_memmove(op, dictEnd - (lowPrefix-match), length); - op += length; - } else { - /* match stretches into both external dictionary and current block */ - size_t const copySize = (size_t)(lowPrefix - match); - size_t const restSize = length - copySize; - LZ4_memcpy(op, dictEnd - copySize, copySize); - op += copySize; - if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ - BYTE* const endOfMatch = op + restSize; - const BYTE* copyFrom = lowPrefix; - while (op < endOfMatch) *op++ = *copyFrom++; - } else { - LZ4_memcpy(op, lowPrefix, restSize); - op += restSize; - } } - continue; - } - assert(match >= lowPrefix); - - /* copy match within block */ - cpy = op + length; - - /* partialDecoding : may end anywhere within the block */ - assert(op<=oend); - if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { - size_t const mlen = MIN(length, (size_t)(oend-op)); - const BYTE* const matchEnd = match + mlen; - BYTE* const copyEnd = op + mlen; - if (matchEnd > op) { /* overlap copy */ - while (op < copyEnd) { *op++ = *match++; } - } else { - LZ4_memcpy(op, match, mlen); - } - op = copyEnd; - if (op == oend) { break; } - continue; - } - - if (unlikely(offset<8)) { - LZ4_write32(op, 0); /* silence msan warning when offset==0 */ - op[0] = match[0]; - op[1] = match[1]; - op[2] = match[2]; - op[3] = match[3]; - match += inc32table[offset]; - LZ4_memcpy(op+4, match, 4); - match -= dec64table[offset]; - } else { - LZ4_memcpy(op, match, 8); - match += 8; - } - op += 8; - - if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { - BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1); - if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */ - if (op < oCopyLimit) { - LZ4_wildCopy8(op, match, oCopyLimit); - match += oCopyLimit - op; - op = oCopyLimit; - } - while (op < cpy) { *op++ = *match++; } - } else { - LZ4_memcpy(op, match, 8); - if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); } - } - op = cpy; /* wildcopy correction */ - } - - /* end of decoding */ - DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst)); - return (int) (((char*)op)-dst); /* Nb of output bytes decoded */ - - /* Overflow error detected */ - _output_error: - return (int) (-(((const char*)ip)-src))-1; - } -} - - -/*===== Instantiate the API decoding functions. =====*/ - -LZ4_FORCE_O2 -int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize) -{ - return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, - decode_full_block, noDict, - (BYTE*)dest, NULL, 0); -} - -LZ4_FORCE_O2 -int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity) -{ - dstCapacity = MIN(targetOutputSize, dstCapacity); - return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity, - partial_decode, - noDict, (BYTE*)dst, NULL, 0); -} - -LZ4_FORCE_O2 -int LZ4_decompress_fast(const char* source, char* dest, int originalSize) -{ - DEBUGLOG(5, "LZ4_decompress_fast"); - return LZ4_decompress_unsafe_generic( - (const BYTE*)source, (BYTE*)dest, originalSize, - 0, NULL, 0); -} - -/*===== Instantiate a few more decoding cases, used more than once. =====*/ - -LZ4_FORCE_O2 /* Exported, an obsolete API function. */ -int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize) -{ - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, - decode_full_block, withPrefix64k, - (BYTE*)dest - 64 KB, NULL, 0); -} - -LZ4_FORCE_O2 -static int LZ4_decompress_safe_partial_withPrefix64k(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity) -{ - dstCapacity = MIN(targetOutputSize, dstCapacity); - return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, - partial_decode, withPrefix64k, - (BYTE*)dest - 64 KB, NULL, 0); -} - -/* Another obsolete API function, paired with the previous one. */ -int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize) -{ - return LZ4_decompress_unsafe_generic( - (const BYTE*)source, (BYTE*)dest, originalSize, - 64 KB, NULL, 0); -} - -LZ4_FORCE_O2 -static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize, - size_t prefixSize) -{ - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, - decode_full_block, noDict, - (BYTE*)dest-prefixSize, NULL, 0); -} - -LZ4_FORCE_O2 -static int LZ4_decompress_safe_partial_withSmallPrefix(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, - size_t prefixSize) -{ - dstCapacity = MIN(targetOutputSize, dstCapacity); - return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, - partial_decode, noDict, - (BYTE*)dest-prefixSize, NULL, 0); -} - -LZ4_FORCE_O2 -int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, - int compressedSize, int maxOutputSize, - const void* dictStart, size_t dictSize) -{ - DEBUGLOG(5, "LZ4_decompress_safe_forceExtDict"); - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, - decode_full_block, usingExtDict, - (BYTE*)dest, (const BYTE*)dictStart, dictSize); -} - -LZ4_FORCE_O2 -int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest, - int compressedSize, int targetOutputSize, int dstCapacity, - const void* dictStart, size_t dictSize) -{ - dstCapacity = MIN(targetOutputSize, dstCapacity); - return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, - partial_decode, usingExtDict, - (BYTE*)dest, (const BYTE*)dictStart, dictSize); -} - -LZ4_FORCE_O2 -static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize, - const void* dictStart, size_t dictSize) -{ - return LZ4_decompress_unsafe_generic( - (const BYTE*)source, (BYTE*)dest, originalSize, - 0, (const BYTE*)dictStart, dictSize); -} - -/* The "double dictionary" mode, for use with e.g. ring buffers: the first part - * of the dictionary is passed as prefix, and the second via dictStart + dictSize. - * These routines are used only once, in LZ4_decompress_*_continue(). - */ -LZ4_FORCE_INLINE -int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize, - size_t prefixSize, const void* dictStart, size_t dictSize) -{ - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, - decode_full_block, usingExtDict, - (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize); -} - -/*===== streaming decompression functions =====*/ - -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -LZ4_streamDecode_t* LZ4_createStreamDecode(void) -{ - LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >= sizeof(LZ4_streamDecode_t_internal)); - return (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t)); -} - -int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream) -{ - if (LZ4_stream == NULL) { return 0; } /* support free on NULL */ - FREEMEM(LZ4_stream); - return 0; -} -#endif - -/*! LZ4_setStreamDecode() : - * Use this function to instruct where to find the dictionary. - * This function is not necessary if previous data is still available where it was decoded. - * Loading a size of 0 is allowed (same effect as no dictionary). - * @return : 1 if OK, 0 if error - */ -int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize) -{ - LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; - lz4sd->prefixSize = (size_t)dictSize; - if (dictSize) { - assert(dictionary != NULL); - lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize; - } else { - lz4sd->prefixEnd = (const BYTE*) dictionary; - } - lz4sd->externalDict = NULL; - lz4sd->extDictSize = 0; - return 1; -} - -/*! LZ4_decoderRingBufferSize() : - * when setting a ring buffer for streaming decompression (optional scenario), - * provides the minimum size of this ring buffer - * to be compatible with any source respecting maxBlockSize condition. - * Note : in a ring buffer scenario, - * blocks are presumed decompressed next to each other. - * When not enough space remains for next block (remainingSize < maxBlockSize), - * decoding resumes from beginning of ring buffer. - * @return : minimum ring buffer size, - * or 0 if there is an error (invalid maxBlockSize). - */ -int LZ4_decoderRingBufferSize(int maxBlockSize) -{ - if (maxBlockSize < 0) return 0; - if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0; - if (maxBlockSize < 16) maxBlockSize = 16; - return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize); -} - -/* -*_continue() : - These decoding functions allow decompression of multiple blocks in "streaming" mode. - Previously decoded blocks must still be available at the memory position where they were decoded. - If it's not possible, save the relevant part of decoded data into a safe buffer, - and indicate where it stands using LZ4_setStreamDecode() -*/ -LZ4_FORCE_O2 -int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize) -{ - LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; - int result; - - if (lz4sd->prefixSize == 0) { - /* The first call, no dictionary yet. */ - assert(lz4sd->extDictSize == 0); - result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); - if (result <= 0) return result; - lz4sd->prefixSize = (size_t)result; - lz4sd->prefixEnd = (BYTE*)dest + result; - } else if (lz4sd->prefixEnd == (BYTE*)dest) { - /* They're rolling the current segment. */ - if (lz4sd->prefixSize >= 64 KB - 1) - result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); - else if (lz4sd->extDictSize == 0) - result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, - lz4sd->prefixSize); - else - result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize, - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); - if (result <= 0) return result; - lz4sd->prefixSize += (size_t)result; - lz4sd->prefixEnd += result; - } else { - /* The buffer wraps around, or they're switching to another buffer. */ - lz4sd->extDictSize = lz4sd->prefixSize; - lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; - result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, - lz4sd->externalDict, lz4sd->extDictSize); - if (result <= 0) return result; - lz4sd->prefixSize = (size_t)result; - lz4sd->prefixEnd = (BYTE*)dest + result; - } - - return result; -} - -LZ4_FORCE_O2 int -LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, - const char* source, char* dest, int originalSize) -{ - LZ4_streamDecode_t_internal* const lz4sd = - (assert(LZ4_streamDecode!=NULL), &LZ4_streamDecode->internal_donotuse); - int result; - - DEBUGLOG(5, "LZ4_decompress_fast_continue (toDecodeSize=%i)", originalSize); - assert(originalSize >= 0); - - if (lz4sd->prefixSize == 0) { - DEBUGLOG(5, "first invocation : no prefix nor extDict"); - assert(lz4sd->extDictSize == 0); - result = LZ4_decompress_fast(source, dest, originalSize); - if (result <= 0) return result; - lz4sd->prefixSize = (size_t)originalSize; - lz4sd->prefixEnd = (BYTE*)dest + originalSize; - } else if (lz4sd->prefixEnd == (BYTE*)dest) { - DEBUGLOG(5, "continue using existing prefix"); - result = LZ4_decompress_unsafe_generic( - (const BYTE*)source, (BYTE*)dest, originalSize, - lz4sd->prefixSize, - lz4sd->externalDict, lz4sd->extDictSize); - if (result <= 0) return result; - lz4sd->prefixSize += (size_t)originalSize; - lz4sd->prefixEnd += originalSize; - } else { - DEBUGLOG(5, "prefix becomes extDict"); - lz4sd->extDictSize = lz4sd->prefixSize; - lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; - result = LZ4_decompress_fast_extDict(source, dest, originalSize, - lz4sd->externalDict, lz4sd->extDictSize); - if (result <= 0) return result; - lz4sd->prefixSize = (size_t)originalSize; - lz4sd->prefixEnd = (BYTE*)dest + originalSize; - } - - return result; -} - - -/* -Advanced decoding functions : -*_usingDict() : - These decoding functions work the same as "_continue" ones, - the dictionary must be explicitly provided within parameters -*/ - -int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) -{ - if (dictSize==0) - return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); - if (dictStart+dictSize == dest) { - if (dictSize >= 64 KB - 1) { - return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); - } - assert(dictSize >= 0); - return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize); - } - assert(dictSize >= 0); - return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize); -} - -int LZ4_decompress_safe_partial_usingDict(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, const char* dictStart, int dictSize) -{ - if (dictSize==0) - return LZ4_decompress_safe_partial(source, dest, compressedSize, targetOutputSize, dstCapacity); - if (dictStart+dictSize == dest) { - if (dictSize >= 64 KB - 1) { - return LZ4_decompress_safe_partial_withPrefix64k(source, dest, compressedSize, targetOutputSize, dstCapacity); - } - assert(dictSize >= 0); - return LZ4_decompress_safe_partial_withSmallPrefix(source, dest, compressedSize, targetOutputSize, dstCapacity, (size_t)dictSize); - } - assert(dictSize >= 0); - return LZ4_decompress_safe_partial_forceExtDict(source, dest, compressedSize, targetOutputSize, dstCapacity, dictStart, (size_t)dictSize); -} - -int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize) -{ - if (dictSize==0 || dictStart+dictSize == dest) - return LZ4_decompress_unsafe_generic( - (const BYTE*)source, (BYTE*)dest, originalSize, - (size_t)dictSize, NULL, 0); - assert(dictSize >= 0); - return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize); -} - - -/*=************************************************* -* Obsolete Functions -***************************************************/ -/* obsolete compression functions */ -int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) -{ - return LZ4_compress_default(source, dest, inputSize, maxOutputSize); -} -int LZ4_compress(const char* src, char* dest, int srcSize) -{ - return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize)); -} -int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) -{ - return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); -} -int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) -{ - return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); -} -int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity) -{ - return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1); -} -int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) -{ - return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); -} - -/* -These decompression functions are deprecated and should no longer be used. -They are only provided here for compatibility with older user programs. -- LZ4_uncompress is totally equivalent to LZ4_decompress_fast -- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe -*/ -int LZ4_uncompress (const char* source, char* dest, int outputSize) -{ - return LZ4_decompress_fast(source, dest, outputSize); -} -int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) -{ - return LZ4_decompress_safe(source, dest, isize, maxOutputSize); -} - -/* Obsolete Streaming functions */ - -int LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); } - -int LZ4_resetStreamState(void* state, char* inputBuffer) -{ - (void)inputBuffer; - LZ4_resetStream((LZ4_stream_t*)state); - return 0; -} - -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -void* LZ4_create (char* inputBuffer) -{ - (void)inputBuffer; - return LZ4_createStream(); -} -#endif - -char* LZ4_slideInputBuffer (void* state) -{ - /* avoid const char * -> char * conversion warning */ - return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary; -} - -#endif /* LZ4_COMMONDEFS_ONLY */ diff --git a/CompressSave/LZ4WrapC/lz4/lz4.h b/CompressSave/LZ4WrapC/lz4/lz4.h deleted file mode 100644 index f85b038..0000000 --- a/CompressSave/LZ4WrapC/lz4/lz4.h +++ /dev/null @@ -1,862 +0,0 @@ -/* - * LZ4 - Fast LZ compression algorithm - * Header File - * Copyright (C) 2011-2020, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 homepage : http://www.lz4.org - - LZ4 source repository : https://github.com/lz4/lz4 -*/ -#if defined (__cplusplus) -extern "C" { -#endif - -#ifndef LZ4_H_2983827168210 -#define LZ4_H_2983827168210 - -/* --- Dependency --- */ -#include /* size_t */ - - -/** - Introduction - - LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core, - scalable with multi-cores CPU. It features an extremely fast decoder, with speed in - multiple GB/s per core, typically reaching RAM speed limits on multi-core systems. - - The LZ4 compression library provides in-memory compression and decompression functions. - It gives full buffer control to user. - Compression can be done in: - - a single step (described as Simple Functions) - - a single step, reusing a context (described in Advanced Functions) - - unbounded multiple steps (described as Streaming compression) - - lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md). - Decompressing such a compressed block requires additional metadata. - Exact metadata depends on exact decompression function. - For the typical case of LZ4_decompress_safe(), - metadata includes block's compressed size, and maximum bound of decompressed size. - Each application is free to encode and pass such metadata in whichever way it wants. - - lz4.h only handle blocks, it can not generate Frames. - - Blocks are different from Frames (doc/lz4_Frame_format.md). - Frames bundle both blocks and metadata in a specified manner. - Embedding metadata is required for compressed data to be self-contained and portable. - Frame format is delivered through a companion API, declared in lz4frame.h. - The `lz4` CLI can only manage frames. -*/ - -/*^*************************************************************** -* Export parameters -*****************************************************************/ -/* -* LZ4_DLL_EXPORT : -* Enable exporting of functions when building a Windows DLL -* LZ4LIB_VISIBILITY : -* Control library symbols visibility. -*/ -#ifndef LZ4LIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default"))) -# else -# define LZ4LIB_VISIBILITY -# endif -#endif -#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) -# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY -#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) -# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ -#else -# define LZ4LIB_API LZ4LIB_VISIBILITY -#endif - -/*! LZ4_FREESTANDING : - * When this macro is set to 1, it enables "freestanding mode" that is - * suitable for typical freestanding environment which doesn't support - * standard C library. - * - * - LZ4_FREESTANDING is a compile-time switch. - * - It requires the following macros to be defined: - * LZ4_memcpy, LZ4_memmove, LZ4_memset. - * - It only enables LZ4/HC functions which don't use heap. - * All LZ4F_* functions are not supported. - * - See tests/freestanding.c to check its basic setup. - */ -#if defined(LZ4_FREESTANDING) && (LZ4_FREESTANDING == 1) -# define LZ4_HEAPMODE 0 -# define LZ4HC_HEAPMODE 0 -# define LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION 1 -# if !defined(LZ4_memcpy) -# error "LZ4_FREESTANDING requires macro 'LZ4_memcpy'." -# endif -# if !defined(LZ4_memset) -# error "LZ4_FREESTANDING requires macro 'LZ4_memset'." -# endif -# if !defined(LZ4_memmove) -# error "LZ4_FREESTANDING requires macro 'LZ4_memmove'." -# endif -#elif ! defined(LZ4_FREESTANDING) -# define LZ4_FREESTANDING 0 -#endif - - -/*------ Version ------*/ -#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ -#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */ -#define LZ4_VERSION_RELEASE 4 /* for tweaks, bug-fixes, or development */ - -#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE) - -#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE -#define LZ4_QUOTE(str) #str -#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str) -#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) /* requires v1.7.3+ */ - -LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version; requires v1.3.0+ */ -LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version; requires v1.7.5+ */ - - -/*-************************************ -* Tuning parameter -**************************************/ -#define LZ4_MEMORY_USAGE_MIN 10 -#define LZ4_MEMORY_USAGE_DEFAULT 14 -#define LZ4_MEMORY_USAGE_MAX 20 - -/*! - * LZ4_MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; ) - * Increasing memory usage improves compression ratio, at the cost of speed. - * Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality. - * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache - */ -#ifndef LZ4_MEMORY_USAGE -# define LZ4_MEMORY_USAGE LZ4_MEMORY_USAGE_DEFAULT -#endif - -#if (LZ4_MEMORY_USAGE < LZ4_MEMORY_USAGE_MIN) -# error "LZ4_MEMORY_USAGE is too small !" -#endif - -#if (LZ4_MEMORY_USAGE > LZ4_MEMORY_USAGE_MAX) -# error "LZ4_MEMORY_USAGE is too large !" -#endif - -/*-************************************ -* Simple Functions -**************************************/ -/*! LZ4_compress_default() : - * Compresses 'srcSize' bytes from buffer 'src' - * into already allocated 'dst' buffer of size 'dstCapacity'. - * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize). - * It also runs faster, so it's a recommended setting. - * If the function cannot compress 'src' into a more limited 'dst' budget, - * compression stops *immediately*, and the function result is zero. - * In which case, 'dst' content is undefined (invalid). - * srcSize : max supported value is LZ4_MAX_INPUT_SIZE. - * dstCapacity : size of buffer 'dst' (which must be already allocated) - * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity) - * or 0 if compression fails - * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer). - */ -LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity); - -/*! LZ4_decompress_safe() : - * @compressedSize : is the exact complete size of the compressed block. - * @dstCapacity : is the size of destination buffer (which must be already allocated), - * is an upper bound of decompressed size. - * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity) - * If destination buffer is not large enough, decoding will stop and output an error code (negative value). - * If the source stream is detected malformed, the function will stop decoding and return a negative result. - * Note 1 : This function is protected against malicious data packets : - * it will never writes outside 'dst' buffer, nor read outside 'source' buffer, - * even if the compressed block is maliciously modified to order the decoder to do these actions. - * In such case, the decoder stops immediately, and considers the compressed block malformed. - * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them. - * The implementation is free to send / store / derive this information in whichever way is most beneficial. - * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead. - */ -LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity); - - -/*-************************************ -* Advanced Functions -**************************************/ -#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ -#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16) - -/*! LZ4_compressBound() : - Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible) - This function is primarily useful for memory allocation purposes (destination buffer size). - Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example). - Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize) - inputSize : max supported value is LZ4_MAX_INPUT_SIZE - return : maximum output size in a "worst case" scenario - or 0, if input size is incorrect (too large or negative) -*/ -LZ4LIB_API int LZ4_compressBound(int inputSize); - -/*! LZ4_compress_fast() : - Same as LZ4_compress_default(), but allows selection of "acceleration" factor. - The larger the acceleration value, the faster the algorithm, but also the lesser the compression. - It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed. - An acceleration value of "1" is the same as regular LZ4_compress_default() - Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c). - Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c). -*/ -LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); - - -/*! LZ4_compress_fast_extState() : - * Same as LZ4_compress_fast(), using an externally allocated memory space for its state. - * Use LZ4_sizeofState() to know how much memory must be allocated, - * and allocate it on 8-bytes boundaries (using `malloc()` typically). - * Then, provide this buffer as `void* state` to compression function. - */ -LZ4LIB_API int LZ4_sizeofState(void); -LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); - - -/*! LZ4_compress_destSize() : - * Reverse the logic : compresses as much data as possible from 'src' buffer - * into already allocated buffer 'dst', of size >= 'targetDestSize'. - * This function either compresses the entire 'src' content into 'dst' if it's large enough, - * or fill 'dst' buffer completely with as much data as possible from 'src'. - * note: acceleration parameter is fixed to "default". - * - * *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'. - * New value is necessarily <= input value. - * @return : Nb bytes written into 'dst' (necessarily <= targetDestSize) - * or 0 if compression fails. - * - * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed in v1.9.2+): - * the produced compressed content could, in specific circumstances, - * require to be decompressed into a destination buffer larger - * by at least 1 byte than the content to decompress. - * If an application uses `LZ4_compress_destSize()`, - * it's highly recommended to update liblz4 to v1.9.2 or better. - * If this can't be done or ensured, - * the receiving decompression function should provide - * a dstCapacity which is > decompressedSize, by at least 1 byte. - * See https://github.com/lz4/lz4/issues/859 for details - */ -LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize); - - -/*! LZ4_decompress_safe_partial() : - * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src', - * into destination buffer 'dst' of size 'dstCapacity'. - * Up to 'targetOutputSize' bytes will be decoded. - * The function stops decoding on reaching this objective. - * This can be useful to boost performance - * whenever only the beginning of a block is required. - * - * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize) - * If source stream is detected malformed, function returns a negative result. - * - * Note 1 : @return can be < targetOutputSize, if compressed block contains less data. - * - * Note 2 : targetOutputSize must be <= dstCapacity - * - * Note 3 : this function effectively stops decoding on reaching targetOutputSize, - * so dstCapacity is kind of redundant. - * This is because in older versions of this function, - * decoding operation would still write complete sequences. - * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize, - * it could write more bytes, though only up to dstCapacity. - * Some "margin" used to be required for this operation to work properly. - * Thankfully, this is no longer necessary. - * The function nonetheless keeps the same signature, in an effort to preserve API compatibility. - * - * Note 4 : If srcSize is the exact size of the block, - * then targetOutputSize can be any value, - * including larger than the block's decompressed size. - * The function will, at most, generate block's decompressed size. - * - * Note 5 : If srcSize is _larger_ than block's compressed size, - * then targetOutputSize **MUST** be <= block's decompressed size. - * Otherwise, *silent corruption will occur*. - */ -LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity); - - -/*-********************************************* -* Streaming Compression Functions -***********************************************/ -typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ - -/** - Note about RC_INVOKED - - - RC_INVOKED is predefined symbol of rc.exe (the resource compiler which is part of MSVC/Visual Studio). - https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros - - - Since rc.exe is a legacy compiler, it truncates long symbol (> 30 chars) - and reports warning "RC4011: identifier truncated". - - - To eliminate the warning, we surround long preprocessor symbol with - "#if !defined(RC_INVOKED) ... #endif" block that means - "skip this block when rc.exe is trying to read it". -*/ -#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */ -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -LZ4LIB_API LZ4_stream_t* LZ4_createStream(void); -LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr); -#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */ -#endif - -/*! LZ4_resetStream_fast() : v1.9.0+ - * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks - * (e.g., LZ4_compress_fast_continue()). - * - * An LZ4_stream_t must be initialized once before usage. - * This is automatically done when created by LZ4_createStream(). - * However, should the LZ4_stream_t be simply declared on stack (for example), - * it's necessary to initialize it first, using LZ4_initStream(). - * - * After init, start any new stream with LZ4_resetStream_fast(). - * A same LZ4_stream_t can be re-used multiple times consecutively - * and compress multiple streams, - * provided that it starts each new stream with LZ4_resetStream_fast(). - * - * LZ4_resetStream_fast() is much faster than LZ4_initStream(), - * but is not compatible with memory regions containing garbage data. - * - * Note: it's only useful to call LZ4_resetStream_fast() - * in the context of streaming compression. - * The *extState* functions perform their own resets. - * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive. - */ -LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr); - -/*! LZ4_loadDict() : - * Use this function to reference a static dictionary into LZ4_stream_t. - * The dictionary must remain available during compression. - * LZ4_loadDict() triggers a reset, so any previous data will be forgotten. - * The same dictionary will have to be loaded on decompression side for successful decoding. - * Dictionary are useful for better compression of small data (KB range). - * While LZ4 accept any input as dictionary, - * results are generally better when using Zstandard's Dictionary Builder. - * Loading a size of 0 is allowed, and is the same as reset. - * @return : loaded dictionary size, in bytes (necessarily <= 64 KB) - */ -LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); - -/*! LZ4_compress_fast_continue() : - * Compress 'src' content using data from previously compressed blocks, for better compression ratio. - * 'dst' buffer must be already allocated. - * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. - * - * @return : size of compressed block - * or 0 if there is an error (typically, cannot fit into 'dst'). - * - * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block. - * Each block has precise boundaries. - * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata. - * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together. - * - * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory ! - * - * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB. - * Make sure that buffers are separated, by at least one byte. - * This construction ensures that each block only depends on previous block. - * - * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB. - * - * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed. - */ -LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); - -/*! LZ4_saveDict() : - * If last 64KB data cannot be guaranteed to remain available at its current memory location, - * save it into a safer place (char* safeBuffer). - * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(), - * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables. - * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error. - */ -LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize); - - -/*-********************************************** -* Streaming Decompression Functions -* Bufferless synchronous API -************************************************/ -typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */ - -/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() : - * creation / destruction of streaming decompression tracking context. - * A tracking context can be re-used multiple times. - */ -#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */ -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void); -LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream); -#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */ -#endif - -/*! LZ4_setStreamDecode() : - * An LZ4_streamDecode_t context can be allocated once and re-used multiple times. - * Use this function to start decompression of a new stream of blocks. - * A dictionary can optionally be set. Use NULL or size 0 for a reset order. - * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression. - * @return : 1 if OK, 0 if error - */ -LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize); - -/*! LZ4_decoderRingBufferSize() : v1.8.2+ - * Note : in a ring buffer scenario (optional), - * blocks are presumed decompressed next to each other - * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize), - * at which stage it resumes from beginning of ring buffer. - * When setting such a ring buffer for streaming decompression, - * provides the minimum size of this ring buffer - * to be compatible with any source respecting maxBlockSize condition. - * @return : minimum ring buffer size, - * or 0 if there is an error (invalid maxBlockSize). - */ -LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize); -#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */ - -/*! LZ4_decompress_safe_continue() : - * This decoding function allows decompression of consecutive blocks in "streaming" mode. - * The difference with the usual independent blocks is that - * new blocks are allowed to find references into former blocks. - * A block is an unsplittable entity, and must be presented entirely to the decompression function. - * LZ4_decompress_safe_continue() only accepts one block at a time. - * It's modeled after `LZ4_decompress_safe()` and behaves similarly. - * - * @LZ4_streamDecode : decompression state, tracking the position in memory of past data - * @compressedSize : exact complete size of one compressed block. - * @dstCapacity : size of destination buffer (which must be already allocated), - * must be an upper bound of decompressed size. - * @return : number of bytes decompressed into destination buffer (necessarily <= dstCapacity) - * If destination buffer is not large enough, decoding will stop and output an error code (negative value). - * If the source stream is detected malformed, the function will stop decoding and return a negative result. - * - * The last 64KB of previously decoded data *must* remain available and unmodified - * at the memory position where they were previously decoded. - * If less than 64KB of data has been decoded, all the data must be present. - * - * Special : if decompression side sets a ring buffer, it must respect one of the following conditions : - * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize). - * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes. - * In which case, encoding and decoding buffers do not need to be synchronized. - * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize. - * - Synchronized mode : - * Decompression buffer size is _exactly_ the same as compression buffer size, - * and follows exactly same update rule (block boundaries at same positions), - * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream), - * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB). - * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes. - * In which case, encoding and decoding buffers do not need to be synchronized, - * and encoding ring buffer can have any size, including small ones ( < 64 KB). - * - * Whenever these conditions are not possible, - * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression, - * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block. -*/ -LZ4LIB_API int -LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, - const char* src, char* dst, - int srcSize, int dstCapacity); - - -/*! LZ4_decompress_safe_usingDict() : - * Works the same as - * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_safe_continue() - * However, it's stateless: it doesn't need any LZ4_streamDecode_t state. - * Dictionary is presumed stable : it must remain accessible and unmodified during decompression. - * Performance tip : Decompression speed can be substantially increased - * when dst == dictStart + dictSize. - */ -LZ4LIB_API int -LZ4_decompress_safe_usingDict(const char* src, char* dst, - int srcSize, int dstCapacity, - const char* dictStart, int dictSize); - -/*! LZ4_decompress_safe_partial_usingDict() : - * Behaves the same as LZ4_decompress_safe_partial() - * with the added ability to specify a memory segment for past data. - * Performance tip : Decompression speed can be substantially increased - * when dst == dictStart + dictSize. - */ -LZ4LIB_API int -LZ4_decompress_safe_partial_usingDict(const char* src, char* dst, - int compressedSize, - int targetOutputSize, int maxOutputSize, - const char* dictStart, int dictSize); - -#endif /* LZ4_H_2983827168210 */ - - -/*^************************************* - * !!!!!! STATIC LINKING ONLY !!!!!! - ***************************************/ - -/*-**************************************************************************** - * Experimental section - * - * Symbols declared in this section must be considered unstable. Their - * signatures or semantics may change, or they may be removed altogether in the - * future. They are therefore only safe to depend on when the caller is - * statically linked against the library. - * - * To protect against unsafe usage, not only are the declarations guarded, - * the definitions are hidden by default - * when building LZ4 as a shared/dynamic library. - * - * In order to access these declarations, - * define LZ4_STATIC_LINKING_ONLY in your application - * before including LZ4's headers. - * - * In order to make their implementations accessible dynamically, you must - * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library. - ******************************************************************************/ - -#ifdef LZ4_STATIC_LINKING_ONLY - -#ifndef LZ4_STATIC_3504398509 -#define LZ4_STATIC_3504398509 - -#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS -#define LZ4LIB_STATIC_API LZ4LIB_API -#else -#define LZ4LIB_STATIC_API -#endif - - -/*! LZ4_compress_fast_extState_fastReset() : - * A variant of LZ4_compress_fast_extState(). - * - * Using this variant avoids an expensive initialization step. - * It is only safe to call if the state buffer is known to be correctly initialized already - * (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized"). - * From a high level, the difference is that - * this function initializes the provided state with a call to something like LZ4_resetStream_fast() - * while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream(). - */ -LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); - -/*! LZ4_attach_dictionary() : - * This is an experimental API that allows - * efficient use of a static dictionary many times. - * - * Rather than re-loading the dictionary buffer into a working context before - * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a - * working LZ4_stream_t, this function introduces a no-copy setup mechanism, - * in which the working stream references the dictionary stream in-place. - * - * Several assumptions are made about the state of the dictionary stream. - * Currently, only streams which have been prepared by LZ4_loadDict() should - * be expected to work. - * - * Alternatively, the provided dictionaryStream may be NULL, - * in which case any existing dictionary stream is unset. - * - * If a dictionary is provided, it replaces any pre-existing stream history. - * The dictionary contents are the only history that can be referenced and - * logically immediately precede the data compressed in the first subsequent - * compression call. - * - * The dictionary will only remain attached to the working stream through the - * first compression call, at the end of which it is cleared. The dictionary - * stream (and source buffer) must remain in-place / accessible / unchanged - * through the completion of the first compression call on the stream. - */ -LZ4LIB_STATIC_API void -LZ4_attach_dictionary(LZ4_stream_t* workingStream, - const LZ4_stream_t* dictionaryStream); - - -/*! In-place compression and decompression - * - * It's possible to have input and output sharing the same buffer, - * for highly constrained memory environments. - * In both cases, it requires input to lay at the end of the buffer, - * and decompression to start at beginning of the buffer. - * Buffer size must feature some margin, hence be larger than final size. - * - * |<------------------------buffer--------------------------------->| - * |<-----------compressed data--------->| - * |<-----------decompressed size------------------>| - * |<----margin---->| - * - * This technique is more useful for decompression, - * since decompressed size is typically larger, - * and margin is short. - * - * In-place decompression will work inside any buffer - * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize). - * This presumes that decompressedSize > compressedSize. - * Otherwise, it means compression actually expanded data, - * and it would be more efficient to store such data with a flag indicating it's not compressed. - * This can happen when data is not compressible (already compressed, or encrypted). - * - * For in-place compression, margin is larger, as it must be able to cope with both - * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX, - * and data expansion, which can happen when input is not compressible. - * As a consequence, buffer size requirements are much higher, - * and memory savings offered by in-place compression are more limited. - * - * There are ways to limit this cost for compression : - * - Reduce history size, by modifying LZ4_DISTANCE_MAX. - * Note that it is a compile-time constant, so all compressions will apply this limit. - * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX, - * so it's a reasonable trick when inputs are known to be small. - * - Require the compressor to deliver a "maximum compressed size". - * This is the `dstCapacity` parameter in `LZ4_compress*()`. - * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail, - * in which case, the return code will be 0 (zero). - * The caller must be ready for these cases to happen, - * and typically design a backup scheme to send data uncompressed. - * The combination of both techniques can significantly reduce - * the amount of margin required for in-place compression. - * - * In-place compression can work in any buffer - * which size is >= (maxCompressedSize) - * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success. - * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX, - * so it's possible to reduce memory requirements by playing with them. - */ - -#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32) -#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */ - -#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */ -# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ -#endif - -#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */ -#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */ - -#endif /* LZ4_STATIC_3504398509 */ -#endif /* LZ4_STATIC_LINKING_ONLY */ - - - -#ifndef LZ4_H_98237428734687 -#define LZ4_H_98237428734687 - -/*-************************************************************ - * Private Definitions - ************************************************************** - * Do not use these definitions directly. - * They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`. - * Accessing members will expose user code to API and/or ABI break in future versions of the library. - **************************************************************/ -#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) -#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) -#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */ - -#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# include - typedef int8_t LZ4_i8; - typedef uint8_t LZ4_byte; - typedef uint16_t LZ4_u16; - typedef uint32_t LZ4_u32; -#else - typedef signed char LZ4_i8; - typedef unsigned char LZ4_byte; - typedef unsigned short LZ4_u16; - typedef unsigned int LZ4_u32; -#endif - -/*! LZ4_stream_t : - * Never ever use below internal definitions directly ! - * These definitions are not API/ABI safe, and may change in future versions. - * If you need static allocation, declare or allocate an LZ4_stream_t object. -**/ - -typedef struct LZ4_stream_t_internal LZ4_stream_t_internal; -struct LZ4_stream_t_internal { - LZ4_u32 hashTable[LZ4_HASH_SIZE_U32]; - const LZ4_byte* dictionary; - const LZ4_stream_t_internal* dictCtx; - LZ4_u32 currentOffset; - LZ4_u32 tableType; - LZ4_u32 dictSize; - /* Implicit padding to ensure structure is aligned */ -}; - -#define LZ4_STREAM_MINSIZE ((1UL << LZ4_MEMORY_USAGE) + 32) /* static size, for inter-version compatibility */ -union LZ4_stream_u { - char minStateSize[LZ4_STREAM_MINSIZE]; - LZ4_stream_t_internal internal_donotuse; -}; /* previously typedef'd to LZ4_stream_t */ - - -/*! LZ4_initStream() : v1.9.0+ - * An LZ4_stream_t structure must be initialized at least once. - * This is automatically done when invoking LZ4_createStream(), - * but it's not when the structure is simply declared on stack (for example). - * - * Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t. - * It can also initialize any arbitrary buffer of sufficient size, - * and will @return a pointer of proper type upon initialization. - * - * Note : initialization fails if size and alignment conditions are not respected. - * In which case, the function will @return NULL. - * Note2: An LZ4_stream_t structure guarantees correct alignment and size. - * Note3: Before v1.9.0, use LZ4_resetStream() instead -**/ -LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size); - - -/*! LZ4_streamDecode_t : - * Never ever use below internal definitions directly ! - * These definitions are not API/ABI safe, and may change in future versions. - * If you need static allocation, declare or allocate an LZ4_streamDecode_t object. -**/ -typedef struct { - const LZ4_byte* externalDict; - const LZ4_byte* prefixEnd; - size_t extDictSize; - size_t prefixSize; -} LZ4_streamDecode_t_internal; - -#define LZ4_STREAMDECODE_MINSIZE 32 -union LZ4_streamDecode_u { - char minStateSize[LZ4_STREAMDECODE_MINSIZE]; - LZ4_streamDecode_t_internal internal_donotuse; -} ; /* previously typedef'd to LZ4_streamDecode_t */ - - - -/*-************************************ -* Obsolete Functions -**************************************/ - -/*! Deprecation warnings - * - * Deprecated functions make the compiler generate a warning when invoked. - * This is meant to invite users to update their source code. - * Should deprecation warnings be a problem, it is generally possible to disable them, - * typically with -Wno-deprecated-declarations for gcc - * or _CRT_SECURE_NO_WARNINGS in Visual. - * - * Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS - * before including the header file. - */ -#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS -# define LZ4_DEPRECATED(message) /* disable deprecation warnings */ -#else -# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ -# define LZ4_DEPRECATED(message) [[deprecated(message)]] -# elif defined(_MSC_VER) -# define LZ4_DEPRECATED(message) __declspec(deprecated(message)) -# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45)) -# define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) -# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31) -# define LZ4_DEPRECATED(message) __attribute__((deprecated)) -# else -# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler") -# define LZ4_DEPRECATED(message) /* disabled */ -# endif -#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */ - -/*! Obsolete compression functions (since v1.7.3) */ -LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize); -LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize); -LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); -LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); -LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); -LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); - -/*! Obsolete decompression functions (since v1.8.0) */ -LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize); -LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); - -/* Obsolete streaming functions (since v1.7.0) - * degraded functionality; do not use! - * - * In order to perform streaming compression, these functions depended on data - * that is no longer tracked in the state. They have been preserved as well as - * possible: using them will still produce a correct output. However, they don't - * actually retain any history between compression calls. The compression ratio - * achieved will therefore be no better than compressing each chunk - * independently. - */ -LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer); -LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void); -LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer); -LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state); - -/*! Obsolete streaming decoding functions (since v1.7.0) */ -LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize); -LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize); - -/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) : - * These functions used to be faster than LZ4_decompress_safe(), - * but this is no longer the case. They are now slower. - * This is because LZ4_decompress_fast() doesn't know the input size, - * and therefore must progress more cautiously into the input buffer to not read beyond the end of block. - * On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability. - * As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated. - * - * The last remaining LZ4_decompress_fast() specificity is that - * it can decompress a block without knowing its compressed size. - * Such functionality can be achieved in a more secure manner - * by employing LZ4_decompress_safe_partial(). - * - * Parameters: - * originalSize : is the uncompressed size to regenerate. - * `dst` must be already allocated, its size must be >= 'originalSize' bytes. - * @return : number of bytes read from source buffer (== compressed size). - * The function expects to finish at block's end exactly. - * If the source stream is detected malformed, the function stops decoding and returns a negative result. - * note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer. - * However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds. - * Also, since match offsets are not validated, match reads from 'src' may underflow too. - * These issues never happen if input (compressed) data is correct. - * But they may happen if input data is invalid (error or intentional tampering). - * As a consequence, use these functions in trusted environments with trusted data **only**. - */ -LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead") -LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize); -LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead") -LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize); -LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead") -LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize); - -/*! LZ4_resetStream() : - * An LZ4_stream_t structure must be initialized at least once. - * This is done with LZ4_initStream(), or LZ4_resetStream(). - * Consider switching to LZ4_initStream(), - * invoking LZ4_resetStream() will trigger deprecation warnings in the future. - */ -LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr); - - -#endif /* LZ4_H_98237428734687 */ - - -#if defined (__cplusplus) -} -#endif diff --git a/CompressSave/LZ4WrapC/lz4/lz4frame.c b/CompressSave/LZ4WrapC/lz4/lz4frame.c deleted file mode 100644 index 77857a5..0000000 --- a/CompressSave/LZ4WrapC/lz4/lz4frame.c +++ /dev/null @@ -1,2076 +0,0 @@ -/* - * LZ4 auto-framing library - * Copyright (C) 2011-2016, Yann Collet. - * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You can contact the author at : - * - LZ4 homepage : http://www.lz4.org - * - LZ4 source repository : https://github.com/lz4/lz4 - */ - -/* LZ4F is a stand-alone API to create LZ4-compressed Frames - * in full conformance with specification v1.6.1 . - * This library rely upon memory management capabilities (malloc, free) - * provided either by , - * or redirected towards another library of user's choice - * (see Memory Routines below). - */ - - -/*-************************************ -* Compiler Options -**************************************/ -#ifdef _MSC_VER /* Visual Studio */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -#endif - - -/*-************************************ -* Tuning parameters -**************************************/ -/* - * LZ4F_HEAPMODE : - * Control how LZ4F_compressFrame allocates the Compression State, - * either on stack (0:default, fastest), or in memory heap (1:requires malloc()). - */ -#ifndef LZ4F_HEAPMODE -# define LZ4F_HEAPMODE 0 -#endif - - -/*-************************************ -* Library declarations -**************************************/ -#define LZ4F_STATIC_LINKING_ONLY -#include "lz4frame.h" -#define LZ4_STATIC_LINKING_ONLY -#include "lz4.h" -#define LZ4_HC_STATIC_LINKING_ONLY -#include "lz4hc.h" -#define XXH_STATIC_LINKING_ONLY -#include "xxhash.h" - - -/*-************************************ -* Memory routines -**************************************/ -/* - * User may redirect invocations of - * malloc(), calloc() and free() - * towards another library or solution of their choice - * by modifying below section. -**/ - -#include /* memset, memcpy, memmove */ -#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */ -# define MEM_INIT(p,v,s) memset((p),(v),(s)) -#endif - -#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */ -# include /* malloc, calloc, free */ -# define ALLOC(s) malloc(s) -# define ALLOC_AND_ZERO(s) calloc(1,(s)) -# define FREEMEM(p) free(p) -#endif - -static void* LZ4F_calloc(size_t s, LZ4F_CustomMem cmem) -{ - /* custom calloc defined : use it */ - if (cmem.customCalloc != NULL) { - return cmem.customCalloc(cmem.opaqueState, s); - } - /* nothing defined : use default 's calloc() */ - if (cmem.customAlloc == NULL) { - return ALLOC_AND_ZERO(s); - } - /* only custom alloc defined : use it, and combine it with memset() */ - { void* const p = cmem.customAlloc(cmem.opaqueState, s); - if (p != NULL) MEM_INIT(p, 0, s); - return p; -} } - -static void* LZ4F_malloc(size_t s, LZ4F_CustomMem cmem) -{ - /* custom malloc defined : use it */ - if (cmem.customAlloc != NULL) { - return cmem.customAlloc(cmem.opaqueState, s); - } - /* nothing defined : use default 's malloc() */ - return ALLOC(s); -} - -static void LZ4F_free(void* p, LZ4F_CustomMem cmem) -{ - /* custom malloc defined : use it */ - if (cmem.customFree != NULL) { - cmem.customFree(cmem.opaqueState, p); - return; - } - /* nothing defined : use default 's free() */ - FREEMEM(p); -} - - -/*-************************************ -* Debug -**************************************/ -#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1) -# include -#else -# ifndef assert -# define assert(condition) ((void)0) -# endif -#endif - -#define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ - -#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG) -# include -static int g_debuglog_enable = 1; -# define DEBUGLOG(l, ...) { \ - if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ - fprintf(stderr, __FILE__ ": "); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, " \n"); \ - } } -#else -# define DEBUGLOG(l, ...) {} /* disabled */ -#endif - - -/*-************************************ -* Basic Types -**************************************/ -#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint8_t BYTE; - typedef uint16_t U16; - typedef uint32_t U32; - typedef int32_t S32; - typedef uint64_t U64; -#else - typedef unsigned char BYTE; - typedef unsigned short U16; - typedef unsigned int U32; - typedef signed int S32; - typedef unsigned long long U64; -#endif - - -/* unoptimized version; solves endianness & alignment issues */ -static U32 LZ4F_readLE32 (const void* src) -{ - const BYTE* const srcPtr = (const BYTE*)src; - U32 value32 = srcPtr[0]; - value32 += ((U32)srcPtr[1])<< 8; - value32 += ((U32)srcPtr[2])<<16; - value32 += ((U32)srcPtr[3])<<24; - return value32; -} - -static void LZ4F_writeLE32 (void* dst, U32 value32) -{ - BYTE* const dstPtr = (BYTE*)dst; - dstPtr[0] = (BYTE)value32; - dstPtr[1] = (BYTE)(value32 >> 8); - dstPtr[2] = (BYTE)(value32 >> 16); - dstPtr[3] = (BYTE)(value32 >> 24); -} - -static U64 LZ4F_readLE64 (const void* src) -{ - const BYTE* const srcPtr = (const BYTE*)src; - U64 value64 = srcPtr[0]; - value64 += ((U64)srcPtr[1]<<8); - value64 += ((U64)srcPtr[2]<<16); - value64 += ((U64)srcPtr[3]<<24); - value64 += ((U64)srcPtr[4]<<32); - value64 += ((U64)srcPtr[5]<<40); - value64 += ((U64)srcPtr[6]<<48); - value64 += ((U64)srcPtr[7]<<56); - return value64; -} - -static void LZ4F_writeLE64 (void* dst, U64 value64) -{ - BYTE* const dstPtr = (BYTE*)dst; - dstPtr[0] = (BYTE)value64; - dstPtr[1] = (BYTE)(value64 >> 8); - dstPtr[2] = (BYTE)(value64 >> 16); - dstPtr[3] = (BYTE)(value64 >> 24); - dstPtr[4] = (BYTE)(value64 >> 32); - dstPtr[5] = (BYTE)(value64 >> 40); - dstPtr[6] = (BYTE)(value64 >> 48); - dstPtr[7] = (BYTE)(value64 >> 56); -} - - -/*-************************************ -* Constants -**************************************/ -#ifndef LZ4_SRC_INCLUDED /* avoid double definition */ -# define KB *(1<<10) -# define MB *(1<<20) -# define GB *(1<<30) -#endif - -#define _1BIT 0x01 -#define _2BITS 0x03 -#define _3BITS 0x07 -#define _4BITS 0x0F -#define _8BITS 0xFF - -#define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U -#define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB - -static const size_t minFHSize = LZ4F_HEADER_SIZE_MIN; /* 7 */ -static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX; /* 19 */ -static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE; /* block header : size, and compress flag */ -static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checksum (optional) */ - - -/*-************************************ -* Structures and local types -**************************************/ - -typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_blockCompression_t; - -typedef struct LZ4F_cctx_s -{ - LZ4F_CustomMem cmem; - LZ4F_preferences_t prefs; - U32 version; - U32 cStage; /* 0 : compression uninitialized ; 1 : initialized, can compress */ - const LZ4F_CDict* cdict; - size_t maxBlockSize; - size_t maxBufferSize; - BYTE* tmpBuff; /* internal buffer, for streaming */ - BYTE* tmpIn; /* starting position of data compress within internal buffer (>= tmpBuff) */ - size_t tmpInSize; /* amount of data to compress after tmpIn */ - U64 totalInSize; - XXH32_state_t xxh; - void* lz4CtxPtr; - U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */ - U16 lz4CtxState; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */ - LZ4F_blockCompression_t blockCompression; -} LZ4F_cctx_t; - - -/*-************************************ -* Error management -**************************************/ -#define LZ4F_GENERATE_STRING(STRING) #STRING, -static const char* LZ4F_errorStrings[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING) }; - - -unsigned LZ4F_isError(LZ4F_errorCode_t code) -{ - return (code > (LZ4F_errorCode_t)(-LZ4F_ERROR_maxCode)); -} - -const char* LZ4F_getErrorName(LZ4F_errorCode_t code) -{ - static const char* codeError = "Unspecified error code"; - if (LZ4F_isError(code)) return LZ4F_errorStrings[-(int)(code)]; - return codeError; -} - -LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult) -{ - if (!LZ4F_isError(functionResult)) return LZ4F_OK_NoError; - return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult); -} - -static LZ4F_errorCode_t LZ4F_returnErrorCode(LZ4F_errorCodes code) -{ - /* A compilation error here means sizeof(ptrdiff_t) is not large enough */ - LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t)); - return (LZ4F_errorCode_t)-(ptrdiff_t)code; -} - -#define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e) - -#define RETURN_ERROR_IF(c,e) do { if (c) RETURN_ERROR(e); } while (0) - -#define FORWARD_IF_ERROR(r) do { if (LZ4F_isError(r)) return (r); } while (0) - -unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; } - -int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX; } - -size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID) -{ - static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB }; - - if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT; - if (blockSizeID < LZ4F_max64KB || blockSizeID > LZ4F_max4MB) - RETURN_ERROR(maxBlockSize_invalid); - { int const blockSizeIdx = (int)blockSizeID - (int)LZ4F_max64KB; - return blockSizes[blockSizeIdx]; -} } - -/*-************************************ -* Private functions -**************************************/ -#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) - -static BYTE LZ4F_headerChecksum (const void* header, size_t length) -{ - U32 const xxh = XXH32(header, length, 0); - return (BYTE)(xxh >> 8); -} - - -/*-************************************ -* Simple-pass compression functions -**************************************/ -static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID, - const size_t srcSize) -{ - LZ4F_blockSizeID_t proposedBSID = LZ4F_max64KB; - size_t maxBlockSize = 64 KB; - while (requestedBSID > proposedBSID) { - if (srcSize <= maxBlockSize) - return proposedBSID; - proposedBSID = (LZ4F_blockSizeID_t)((int)proposedBSID + 1); - maxBlockSize <<= 2; - } - return requestedBSID; -} - -/*! LZ4F_compressBound_internal() : - * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations. - * prefsPtr is optional : if NULL is provided, preferences will be set to cover worst case scenario. - * @return is always the same for a srcSize and prefsPtr, so it can be relied upon to size reusable buffers. - * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations. - */ -static size_t LZ4F_compressBound_internal(size_t srcSize, - const LZ4F_preferences_t* preferencesPtr, - size_t alreadyBuffered) -{ - LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES; - prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */ - prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */ - { const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr; - U32 const flush = prefsPtr->autoFlush | (srcSize==0); - LZ4F_blockSizeID_t const blockID = prefsPtr->frameInfo.blockSizeID; - size_t const blockSize = LZ4F_getBlockSize(blockID); - size_t const maxBuffered = blockSize - 1; - size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered); - size_t const maxSrcSize = srcSize + bufferedSize; - unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize); - size_t const partialBlockSize = maxSrcSize & (blockSize-1); - size_t const lastBlockSize = flush ? partialBlockSize : 0; - unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0); - - size_t const blockCRCSize = BFSize * prefsPtr->frameInfo.blockChecksumFlag; - size_t const frameEnd = BHSize + (prefsPtr->frameInfo.contentChecksumFlag*BFSize); - - return ((BHSize + blockCRCSize) * nbBlocks) + - (blockSize * nbFullBlocks) + lastBlockSize + frameEnd; - } -} - -size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr) -{ - LZ4F_preferences_t prefs; - size_t const headerSize = maxFHSize; /* max header size, including optional fields */ - - if (preferencesPtr!=NULL) prefs = *preferencesPtr; - else MEM_INIT(&prefs, 0, sizeof(prefs)); - prefs.autoFlush = 1; - - return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);; -} - - -/*! LZ4F_compressFrame_usingCDict() : - * Compress srcBuffer using a dictionary, in a single step. - * cdict can be NULL, in which case, no dictionary is used. - * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). - * The LZ4F_preferences_t structure is optional : you may provide NULL as argument, - * however, it's the only way to provide a dictID, so it's not recommended. - * @return : number of bytes written into dstBuffer, - * or an error code if it fails (can be tested using LZ4F_isError()) - */ -size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_CDict* cdict, - const LZ4F_preferences_t* preferencesPtr) -{ - LZ4F_preferences_t prefs; - LZ4F_compressOptions_t options; - BYTE* const dstStart = (BYTE*) dstBuffer; - BYTE* dstPtr = dstStart; - BYTE* const dstEnd = dstStart + dstCapacity; - - if (preferencesPtr!=NULL) - prefs = *preferencesPtr; - else - MEM_INIT(&prefs, 0, sizeof(prefs)); - if (prefs.frameInfo.contentSize != 0) - prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */ - - prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize); - prefs.autoFlush = 1; - if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID)) - prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* only one block => no need for inter-block link */ - - MEM_INIT(&options, 0, sizeof(options)); - options.stableSrc = 1; - - RETURN_ERROR_IF(dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs), dstMaxSize_tooSmall); - - { size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */ - FORWARD_IF_ERROR(headerSize); - dstPtr += headerSize; /* header size */ } - - assert(dstEnd >= dstPtr); - { size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, (size_t)(dstEnd-dstPtr), srcBuffer, srcSize, &options); - FORWARD_IF_ERROR(cSize); - dstPtr += cSize; } - - assert(dstEnd >= dstPtr); - { size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, (size_t)(dstEnd-dstPtr), &options); /* flush last block, and generate suffix */ - FORWARD_IF_ERROR(tailSize); - dstPtr += tailSize; } - - assert(dstEnd >= dstStart); - return (size_t)(dstPtr - dstStart); -} - - -/*! LZ4F_compressFrame() : - * Compress an entire srcBuffer into a valid LZ4 frame, in a single step. - * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). - * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default. - * @return : number of bytes written into dstBuffer. - * or an error code if it fails (can be tested using LZ4F_isError()) - */ -size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_preferences_t* preferencesPtr) -{ - size_t result; -#if (LZ4F_HEAPMODE) - LZ4F_cctx_t* cctxPtr; - result = LZ4F_createCompressionContext(&cctxPtr, LZ4F_VERSION); - FORWARD_IF_ERROR(result); -#else - LZ4F_cctx_t cctx; - LZ4_stream_t lz4ctx; - LZ4F_cctx_t* const cctxPtr = &cctx; - - MEM_INIT(&cctx, 0, sizeof(cctx)); - cctx.version = LZ4F_VERSION; - cctx.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */ - if ( preferencesPtr == NULL - || preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN ) { - LZ4_initStream(&lz4ctx, sizeof(lz4ctx)); - cctxPtr->lz4CtxPtr = &lz4ctx; - cctxPtr->lz4CtxAlloc = 1; - cctxPtr->lz4CtxState = 1; - } -#endif - DEBUGLOG(4, "LZ4F_compressFrame"); - - result = LZ4F_compressFrame_usingCDict(cctxPtr, dstBuffer, dstCapacity, - srcBuffer, srcSize, - NULL, preferencesPtr); - -#if (LZ4F_HEAPMODE) - LZ4F_freeCompressionContext(cctxPtr); -#else - if ( preferencesPtr != NULL - && preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN ) { - LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); - } -#endif - return result; -} - - -/*-*************************************************** -* Dictionary compression -*****************************************************/ - -struct LZ4F_CDict_s { - LZ4F_CustomMem cmem; - void* dictContent; - LZ4_stream_t* fastCtx; - LZ4_streamHC_t* HCCtx; -}; /* typedef'd to LZ4F_CDict within lz4frame_static.h */ - -LZ4F_CDict* -LZ4F_createCDict_advanced(LZ4F_CustomMem cmem, const void* dictBuffer, size_t dictSize) -{ - const char* dictStart = (const char*)dictBuffer; - LZ4F_CDict* const cdict = (LZ4F_CDict*)LZ4F_malloc(sizeof(*cdict), cmem); - DEBUGLOG(4, "LZ4F_createCDict_advanced"); - if (!cdict) return NULL; - cdict->cmem = cmem; - if (dictSize > 64 KB) { - dictStart += dictSize - 64 KB; - dictSize = 64 KB; - } - cdict->dictContent = LZ4F_malloc(dictSize, cmem); - cdict->fastCtx = (LZ4_stream_t*)LZ4F_malloc(sizeof(LZ4_stream_t), cmem); - if (cdict->fastCtx) - LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t)); - cdict->HCCtx = (LZ4_streamHC_t*)LZ4F_malloc(sizeof(LZ4_streamHC_t), cmem); - if (cdict->HCCtx) - LZ4_initStream(cdict->HCCtx, sizeof(LZ4_streamHC_t)); - if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) { - LZ4F_freeCDict(cdict); - return NULL; - } - memcpy(cdict->dictContent, dictStart, dictSize); - LZ4_loadDict (cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize); - LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT); - LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize); - return cdict; -} - -/*! LZ4F_createCDict() : - * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once. - * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. - * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. - * @dictBuffer can be released after LZ4F_CDict creation, since its content is copied within CDict - * @return : digested dictionary for compression, or NULL if failed */ -LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize) -{ - DEBUGLOG(4, "LZ4F_createCDict"); - return LZ4F_createCDict_advanced(LZ4F_defaultCMem, dictBuffer, dictSize); -} - -void LZ4F_freeCDict(LZ4F_CDict* cdict) -{ - if (cdict==NULL) return; /* support free on NULL */ - LZ4F_free(cdict->dictContent, cdict->cmem); - LZ4F_free(cdict->fastCtx, cdict->cmem); - LZ4F_free(cdict->HCCtx, cdict->cmem); - LZ4F_free(cdict, cdict->cmem); -} - - -/*-********************************* -* Advanced compression functions -***********************************/ - -LZ4F_cctx* -LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version) -{ - LZ4F_cctx* const cctxPtr = - (LZ4F_cctx*)LZ4F_calloc(sizeof(LZ4F_cctx), customMem); - if (cctxPtr==NULL) return NULL; - - cctxPtr->cmem = customMem; - cctxPtr->version = version; - cctxPtr->cStage = 0; /* Uninitialized. Next stage : init cctx */ - - return cctxPtr; -} - -/*! LZ4F_createCompressionContext() : - * The first thing to do is to create a compressionContext object, which will be used in all compression operations. - * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure. - * The version provided MUST be LZ4F_VERSION. It is intended to track potential incompatible differences between different binaries. - * The function will provide a pointer to an allocated LZ4F_compressionContext_t object. - * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation. - * Object can release its memory using LZ4F_freeCompressionContext(); -**/ -LZ4F_errorCode_t -LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version) -{ - assert(LZ4F_compressionContextPtr != NULL); /* considered a violation of narrow contract */ - /* in case it nonetheless happen in production */ - RETURN_ERROR_IF(LZ4F_compressionContextPtr == NULL, parameter_null); - - *LZ4F_compressionContextPtr = LZ4F_createCompressionContext_advanced(LZ4F_defaultCMem, version); - RETURN_ERROR_IF(*LZ4F_compressionContextPtr==NULL, allocation_failed); - return LZ4F_OK_NoError; -} - - -LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr) -{ - if (cctxPtr != NULL) { /* support free on NULL */ - LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */ - LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem); - LZ4F_free(cctxPtr, cctxPtr->cmem); - } - return LZ4F_OK_NoError; -} - - -/** - * This function prepares the internal LZ4(HC) stream for a new compression, - * resetting the context and attaching the dictionary, if there is one. - * - * It needs to be called at the beginning of each independent compression - * stream (i.e., at the beginning of a frame in blockLinked mode, or at the - * beginning of each block in blockIndependent mode). - */ -static void LZ4F_initStream(void* ctx, - const LZ4F_CDict* cdict, - int level, - LZ4F_blockMode_t blockMode) { - if (level < LZ4HC_CLEVEL_MIN) { - if (cdict != NULL || blockMode == LZ4F_blockLinked) { - /* In these cases, we will call LZ4_compress_fast_continue(), - * which needs an already reset context. Otherwise, we'll call a - * one-shot API. The non-continued APIs internally perform their own - * resets at the beginning of their calls, where they know what - * tableType they need the context to be in. So in that case this - * would be misguided / wasted work. */ - LZ4_resetStream_fast((LZ4_stream_t*)ctx); - } - LZ4_attach_dictionary((LZ4_stream_t *)ctx, cdict ? cdict->fastCtx : NULL); - } else { - LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level); - LZ4_attach_HC_dictionary((LZ4_streamHC_t *)ctx, cdict ? cdict->HCCtx : NULL); - } -} - -static int ctxTypeID_to_size(int ctxTypeID) { - switch(ctxTypeID) { - case 1: - return LZ4_sizeofState(); - case 2: - return LZ4_sizeofStateHC(); - default: - return 0; - } -} - -/*! LZ4F_compressBegin_usingCDict() : - * init streaming compression AND writes frame header into @dstBuffer. - * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. - * @return : number of bytes written into @dstBuffer for the header - * or an error code (can be tested using LZ4F_isError()) - */ -size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr, - void* dstBuffer, size_t dstCapacity, - const LZ4F_CDict* cdict, - const LZ4F_preferences_t* preferencesPtr) -{ - LZ4F_preferences_t const prefNull = LZ4F_INIT_PREFERENCES; - BYTE* const dstStart = (BYTE*)dstBuffer; - BYTE* dstPtr = dstStart; - - RETURN_ERROR_IF(dstCapacity < maxFHSize, dstMaxSize_tooSmall); - if (preferencesPtr == NULL) preferencesPtr = &prefNull; - cctxPtr->prefs = *preferencesPtr; - - /* cctx Management */ - { U16 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2; - int requiredSize = ctxTypeID_to_size(ctxTypeID); - int allocatedSize = ctxTypeID_to_size(cctxPtr->lz4CtxAlloc); - if (allocatedSize < requiredSize) { - /* not enough space allocated */ - LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); - if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { - /* must take ownership of memory allocation, - * in order to respect custom allocator contract */ - cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctxPtr->cmem); - if (cctxPtr->lz4CtxPtr) - LZ4_initStream(cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t)); - } else { - cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctxPtr->cmem); - if (cctxPtr->lz4CtxPtr) - LZ4_initStreamHC(cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t)); - } - RETURN_ERROR_IF(cctxPtr->lz4CtxPtr == NULL, allocation_failed); - cctxPtr->lz4CtxAlloc = ctxTypeID; - cctxPtr->lz4CtxState = ctxTypeID; - } else if (cctxPtr->lz4CtxState != ctxTypeID) { - /* otherwise, a sufficient buffer is already allocated, - * but we need to reset it to the correct context type */ - if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { - LZ4_initStream((LZ4_stream_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t)); - } else { - LZ4_initStreamHC((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t)); - LZ4_setCompressionLevel((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel); - } - cctxPtr->lz4CtxState = ctxTypeID; - } } - - /* Buffer Management */ - if (cctxPtr->prefs.frameInfo.blockSizeID == 0) - cctxPtr->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT; - cctxPtr->maxBlockSize = LZ4F_getBlockSize(cctxPtr->prefs.frameInfo.blockSizeID); - - { size_t const requiredBuffSize = preferencesPtr->autoFlush ? - ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */ - cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0); - - if (cctxPtr->maxBufferSize < requiredBuffSize) { - cctxPtr->maxBufferSize = 0; - LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem); - cctxPtr->tmpBuff = (BYTE*)LZ4F_malloc(requiredBuffSize, cctxPtr->cmem); - RETURN_ERROR_IF(cctxPtr->tmpBuff == NULL, allocation_failed); - cctxPtr->maxBufferSize = requiredBuffSize; - } } - cctxPtr->tmpIn = cctxPtr->tmpBuff; - cctxPtr->tmpInSize = 0; - (void)XXH32_reset(&(cctxPtr->xxh), 0); - - /* context init */ - cctxPtr->cdict = cdict; - if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) { - /* frame init only for blockLinked : blockIndependent will be init at each block */ - LZ4F_initStream(cctxPtr->lz4CtxPtr, cdict, cctxPtr->prefs.compressionLevel, LZ4F_blockLinked); - } - if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) { - LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed); - } - - /* Magic Number */ - LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER); - dstPtr += 4; - { BYTE* const headerStart = dstPtr; - - /* FLG Byte */ - *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */ - + ((cctxPtr->prefs.frameInfo.blockMode & _1BIT ) << 5) - + ((cctxPtr->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4) - + ((unsigned)(cctxPtr->prefs.frameInfo.contentSize > 0) << 3) - + ((cctxPtr->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2) - + (cctxPtr->prefs.frameInfo.dictID > 0) ); - /* BD Byte */ - *dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4); - /* Optional Frame content size field */ - if (cctxPtr->prefs.frameInfo.contentSize) { - LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize); - dstPtr += 8; - cctxPtr->totalInSize = 0; - } - /* Optional dictionary ID field */ - if (cctxPtr->prefs.frameInfo.dictID) { - LZ4F_writeLE32(dstPtr, cctxPtr->prefs.frameInfo.dictID); - dstPtr += 4; - } - /* Header CRC Byte */ - *dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart)); - dstPtr++; - } - - cctxPtr->cStage = 1; /* header written, now request input data block */ - return (size_t)(dstPtr - dstStart); -} - - -/*! LZ4F_compressBegin() : - * init streaming compression AND writes frame header into @dstBuffer. - * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. - * @preferencesPtr can be NULL, in which case default parameters are selected. - * @return : number of bytes written into dstBuffer for the header - * or an error code (can be tested using LZ4F_isError()) - */ -size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr, - void* dstBuffer, size_t dstCapacity, - const LZ4F_preferences_t* preferencesPtr) -{ - return LZ4F_compressBegin_usingCDict(cctxPtr, dstBuffer, dstCapacity, - NULL, preferencesPtr); -} - - -/* LZ4F_compressBound() : - * @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario. - * LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario. - * This function cannot fail. - */ -size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr) -{ - if (preferencesPtr && preferencesPtr->autoFlush) { - return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0); - } - return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1); -} - - -typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level, const LZ4F_CDict* cdict); - - -/*! LZ4F_makeBlock(): - * compress a single block, add header and optional checksum. - * assumption : dst buffer capacity is >= BHSize + srcSize + crcSize - */ -static size_t LZ4F_makeBlock(void* dst, - const void* src, size_t srcSize, - compressFunc_t compress, void* lz4ctx, int level, - const LZ4F_CDict* cdict, - LZ4F_blockChecksum_t crcFlag) -{ - BYTE* const cSizePtr = (BYTE*)dst; - U32 cSize; - assert(compress != NULL); - cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize), - (int)(srcSize), (int)(srcSize-1), - level, cdict); - - if (cSize == 0 || cSize >= srcSize) { - cSize = (U32)srcSize; - LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG); - memcpy(cSizePtr+BHSize, src, srcSize); - } else { - LZ4F_writeLE32(cSizePtr, cSize); - } - if (crcFlag) { - U32 const crc32 = XXH32(cSizePtr+BHSize, cSize, 0); /* checksum of compressed data */ - LZ4F_writeLE32(cSizePtr+BHSize+cSize, crc32); - } - return BHSize + cSize + ((U32)crcFlag)*BFSize; -} - - -static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) -{ - int const acceleration = (level < 0) ? -level + 1 : 1; - DEBUGLOG(5, "LZ4F_compressBlock (srcSize=%i)", srcSize); - LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent); - if (cdict) { - return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration); - } else { - return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration); - } -} - -static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) -{ - int const acceleration = (level < 0) ? -level + 1 : 1; - (void)cdict; /* init once at beginning of frame */ - DEBUGLOG(5, "LZ4F_compressBlock_continue (srcSize=%i)", srcSize); - return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration); -} - -static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) -{ - LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent); - if (cdict) { - return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity); - } - return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level); -} - -static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) -{ - (void)level; (void)cdict; /* init once at beginning of frame */ - return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity); -} - -static int LZ4F_doNotCompressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) -{ - (void)ctx; (void)src; (void)dst; (void)srcSize; (void)dstCapacity; (void)level; (void)cdict; - return 0; -} - -static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_blockCompression_t compressMode) -{ - if (compressMode == LZ4B_UNCOMPRESSED) return LZ4F_doNotCompressBlock; - if (level < LZ4HC_CLEVEL_MIN) { - if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock; - return LZ4F_compressBlock_continue; - } - if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlockHC; - return LZ4F_compressBlockHC_continue; -} - -/* Save history (up to 64KB) into @tmpBuff */ -static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr) -{ - if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) - return LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB); - return LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB); -} - -typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus; - -static const LZ4F_compressOptions_t k_cOptionsNull = { 0, { 0, 0, 0 } }; - - - /*! LZ4F_compressUpdateImpl() : - * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. - * When successful, the function always entirely consumes @srcBuffer. - * src data is either buffered or compressed into @dstBuffer. - * If the block compression does not match the compression of the previous block, the old data is flushed - * and operations continue with the new compression mode. - * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr) when block compression is turned on. - * @compressOptionsPtr is optional : provide NULL to mean "default". - * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. - * or an error code if it fails (which can be tested using LZ4F_isError()) - * After an error, the state is left in a UB state, and must be re-initialized. - */ -static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr, - void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_compressOptions_t* compressOptionsPtr, - LZ4F_blockCompression_t blockCompression) - { - size_t const blockSize = cctxPtr->maxBlockSize; - const BYTE* srcPtr = (const BYTE*)srcBuffer; - const BYTE* const srcEnd = srcPtr + srcSize; - BYTE* const dstStart = (BYTE*)dstBuffer; - BYTE* dstPtr = dstStart; - LZ4F_lastBlockStatus lastBlockCompressed = notDone; - compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, blockCompression); - size_t bytesWritten; - DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize); - - RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); /* state must be initialized and waiting for next block */ - if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize)) - RETURN_ERROR(dstMaxSize_tooSmall); - - if (blockCompression == LZ4B_UNCOMPRESSED && dstCapacity < srcSize) - RETURN_ERROR(dstMaxSize_tooSmall); - - /* flush currently written block, to continue with new block compression */ - if (cctxPtr->blockCompression != blockCompression) { - bytesWritten = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr); - dstPtr += bytesWritten; - cctxPtr->blockCompression = blockCompression; - } - - if (compressOptionsPtr == NULL) compressOptionsPtr = &k_cOptionsNull; - - /* complete tmp buffer */ - if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */ - size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize; - assert(blockSize > cctxPtr->tmpInSize); - if (sizeToCopy > srcSize) { - /* add src to tmpIn buffer */ - memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize); - srcPtr = srcEnd; - cctxPtr->tmpInSize += srcSize; - /* still needs some CRC */ - } else { - /* complete tmpIn block and then compress it */ - lastBlockCompressed = fromTmpBuffer; - memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy); - srcPtr += sizeToCopy; - - dstPtr += LZ4F_makeBlock(dstPtr, - cctxPtr->tmpIn, blockSize, - compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, - cctxPtr->cdict, - cctxPtr->prefs.frameInfo.blockChecksumFlag); - if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize; - cctxPtr->tmpInSize = 0; - } } - - while ((size_t)(srcEnd - srcPtr) >= blockSize) { - /* compress full blocks */ - lastBlockCompressed = fromSrcBuffer; - dstPtr += LZ4F_makeBlock(dstPtr, - srcPtr, blockSize, - compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, - cctxPtr->cdict, - cctxPtr->prefs.frameInfo.blockChecksumFlag); - srcPtr += blockSize; - } - - if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) { - /* autoFlush : remaining input (< blockSize) is compressed */ - lastBlockCompressed = fromSrcBuffer; - dstPtr += LZ4F_makeBlock(dstPtr, - srcPtr, (size_t)(srcEnd - srcPtr), - compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, - cctxPtr->cdict, - cctxPtr->prefs.frameInfo.blockChecksumFlag); - srcPtr = srcEnd; - } - - /* preserve dictionary within @tmpBuff whenever necessary */ - if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) { - /* linked blocks are only supported in compressed mode, see LZ4F_uncompressedUpdate */ - assert(blockCompression == LZ4B_COMPRESSED); - if (compressOptionsPtr->stableSrc) { - cctxPtr->tmpIn = cctxPtr->tmpBuff; /* src is stable : dictionary remains in src across invocations */ - } else { - int const realDictSize = LZ4F_localSaveDict(cctxPtr); - assert(0 <= realDictSize && realDictSize <= 64 KB); - cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize; - } - } - - /* keep tmpIn within limits */ - if (!(cctxPtr->prefs.autoFlush) /* no autoflush : there may be some data left within internal buffer */ - && (cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) ) /* not enough room to store next block */ - { - /* only preserve 64KB within internal buffer. Ensures there is enough room for next block. - * note: this situation necessarily implies lastBlockCompressed==fromTmpBuffer */ - int const realDictSize = LZ4F_localSaveDict(cctxPtr); - cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize; - assert((cctxPtr->tmpIn + blockSize) <= (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)); - } - - /* some input data left, necessarily < blockSize */ - if (srcPtr < srcEnd) { - /* fill tmp buffer */ - size_t const sizeToCopy = (size_t)(srcEnd - srcPtr); - memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy); - cctxPtr->tmpInSize = sizeToCopy; - } - - if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) - (void)XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize); - - cctxPtr->totalInSize += srcSize; - return (size_t)(dstPtr - dstStart); -} - -/*! LZ4F_compressUpdate() : - * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. - * When successful, the function always entirely consumes @srcBuffer. - * src data is either buffered or compressed into @dstBuffer. - * If previously an uncompressed block was written, buffered data is flushed - * before appending compressed data is continued. - * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr). - * @compressOptionsPtr is optional : provide NULL to mean "default". - * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. - * or an error code if it fails (which can be tested using LZ4F_isError()) - * After an error, the state is left in a UB state, and must be re-initialized. - */ -size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr, - void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_compressOptions_t* compressOptionsPtr) -{ - return LZ4F_compressUpdateImpl(cctxPtr, - dstBuffer, dstCapacity, - srcBuffer, srcSize, - compressOptionsPtr, LZ4B_COMPRESSED); -} - -/*! LZ4F_compressUpdate() : - * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. - * When successful, the function always entirely consumes @srcBuffer. - * src data is either buffered or compressed into @dstBuffer. - * If previously an uncompressed block was written, buffered data is flushed - * before appending compressed data is continued. - * This is only supported when LZ4F_blockIndependent is used - * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr). - * @compressOptionsPtr is optional : provide NULL to mean "default". - * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. - * or an error code if it fails (which can be tested using LZ4F_isError()) - * After an error, the state is left in a UB state, and must be re-initialized. - */ -size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr, - void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_compressOptions_t* compressOptionsPtr) { - return LZ4F_compressUpdateImpl(cctxPtr, - dstBuffer, dstCapacity, - srcBuffer, srcSize, - compressOptionsPtr, LZ4B_UNCOMPRESSED); -} - - -/*! LZ4F_flush() : - * When compressed data must be sent immediately, without waiting for a block to be filled, - * invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx. - * The result of the function is the number of bytes written into dstBuffer. - * It can be zero, this means there was no data left within LZ4F_cctx. - * The function outputs an error code if it fails (can be tested using LZ4F_isError()) - * LZ4F_compressOptions_t* is optional. NULL is a valid argument. - */ -size_t LZ4F_flush(LZ4F_cctx* cctxPtr, - void* dstBuffer, size_t dstCapacity, - const LZ4F_compressOptions_t* compressOptionsPtr) -{ - BYTE* const dstStart = (BYTE*)dstBuffer; - BYTE* dstPtr = dstStart; - compressFunc_t compress; - - if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */ - RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); - RETURN_ERROR_IF(dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize), dstMaxSize_tooSmall); - (void)compressOptionsPtr; /* not useful (yet) */ - - /* select compression function */ - compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompression); - - /* compress tmp buffer */ - dstPtr += LZ4F_makeBlock(dstPtr, - cctxPtr->tmpIn, cctxPtr->tmpInSize, - compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, - cctxPtr->cdict, - cctxPtr->prefs.frameInfo.blockChecksumFlag); - assert(((void)"flush overflows dstBuffer!", (size_t)(dstPtr - dstStart) <= dstCapacity)); - - if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) - cctxPtr->tmpIn += cctxPtr->tmpInSize; - cctxPtr->tmpInSize = 0; - - /* keep tmpIn within limits */ - if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { /* necessarily LZ4F_blockLinked */ - int const realDictSize = LZ4F_localSaveDict(cctxPtr); - cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize; - } - - return (size_t)(dstPtr - dstStart); -} - - -/*! LZ4F_compressEnd() : - * When you want to properly finish the compressed frame, just call LZ4F_compressEnd(). - * It will flush whatever data remained within compressionContext (like LZ4_flush()) - * but also properly finalize the frame, with an endMark and an (optional) checksum. - * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument. - * @return: the number of bytes written into dstBuffer (necessarily >= 4 (endMark size)) - * or an error code if it fails (can be tested using LZ4F_isError()) - * The context can then be used again to compress a new frame, starting with LZ4F_compressBegin(). - */ -size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr, - void* dstBuffer, size_t dstCapacity, - const LZ4F_compressOptions_t* compressOptionsPtr) -{ - BYTE* const dstStart = (BYTE*)dstBuffer; - BYTE* dstPtr = dstStart; - - size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr); - DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity); - FORWARD_IF_ERROR(flushSize); - dstPtr += flushSize; - - assert(flushSize <= dstCapacity); - dstCapacity -= flushSize; - - RETURN_ERROR_IF(dstCapacity < 4, dstMaxSize_tooSmall); - LZ4F_writeLE32(dstPtr, 0); - dstPtr += 4; /* endMark */ - - if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) { - U32 const xxh = XXH32_digest(&(cctxPtr->xxh)); - RETURN_ERROR_IF(dstCapacity < 8, dstMaxSize_tooSmall); - DEBUGLOG(5,"Writing 32-bit content checksum"); - LZ4F_writeLE32(dstPtr, xxh); - dstPtr+=4; /* content Checksum */ - } - - cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */ - - if (cctxPtr->prefs.frameInfo.contentSize) { - if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize) - RETURN_ERROR(frameSize_wrong); - } - - return (size_t)(dstPtr - dstStart); -} - - -/*-*************************************************** -* Frame Decompression -*****************************************************/ - -typedef enum { - dstage_getFrameHeader=0, dstage_storeFrameHeader, - dstage_init, - dstage_getBlockHeader, dstage_storeBlockHeader, - dstage_copyDirect, dstage_getBlockChecksum, - dstage_getCBlock, dstage_storeCBlock, - dstage_flushOut, - dstage_getSuffix, dstage_storeSuffix, - dstage_getSFrameSize, dstage_storeSFrameSize, - dstage_skipSkippable -} dStage_t; - -struct LZ4F_dctx_s { - LZ4F_CustomMem cmem; - LZ4F_frameInfo_t frameInfo; - U32 version; - dStage_t dStage; - U64 frameRemainingSize; - size_t maxBlockSize; - size_t maxBufferSize; - BYTE* tmpIn; - size_t tmpInSize; - size_t tmpInTarget; - BYTE* tmpOutBuffer; - const BYTE* dict; - size_t dictSize; - BYTE* tmpOut; - size_t tmpOutSize; - size_t tmpOutStart; - XXH32_state_t xxh; - XXH32_state_t blockChecksum; - int skipChecksum; - BYTE header[LZ4F_HEADER_SIZE_MAX]; -}; /* typedef'd to LZ4F_dctx in lz4frame.h */ - - -LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version) -{ - LZ4F_dctx* const dctx = (LZ4F_dctx*)LZ4F_calloc(sizeof(LZ4F_dctx), customMem); - if (dctx == NULL) return NULL; - - dctx->cmem = customMem; - dctx->version = version; - return dctx; -} - -/*! LZ4F_createDecompressionContext() : - * Create a decompressionContext object, which will track all decompression operations. - * Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object. - * Object can later be released using LZ4F_freeDecompressionContext(). - * @return : if != 0, there was an error during context creation. - */ -LZ4F_errorCode_t -LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber) -{ - assert(LZ4F_decompressionContextPtr != NULL); /* violation of narrow contract */ - RETURN_ERROR_IF(LZ4F_decompressionContextPtr == NULL, parameter_null); /* in case it nonetheless happen in production */ - - *LZ4F_decompressionContextPtr = LZ4F_createDecompressionContext_advanced(LZ4F_defaultCMem, versionNumber); - if (*LZ4F_decompressionContextPtr == NULL) { /* failed allocation */ - RETURN_ERROR(allocation_failed); - } - return LZ4F_OK_NoError; -} - -LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx) -{ - LZ4F_errorCode_t result = LZ4F_OK_NoError; - if (dctx != NULL) { /* can accept NULL input, like free() */ - result = (LZ4F_errorCode_t)dctx->dStage; - LZ4F_free(dctx->tmpIn, dctx->cmem); - LZ4F_free(dctx->tmpOutBuffer, dctx->cmem); - LZ4F_free(dctx, dctx->cmem); - } - return result; -} - - -/*==--- Streaming Decompression operations ---==*/ - -void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx) -{ - dctx->dStage = dstage_getFrameHeader; - dctx->dict = NULL; - dctx->dictSize = 0; - dctx->skipChecksum = 0; -} - - -/*! LZ4F_decodeHeader() : - * input : `src` points at the **beginning of the frame** - * output : set internal values of dctx, such as - * dctx->frameInfo and dctx->dStage. - * Also allocates internal buffers. - * @return : nb Bytes read from src (necessarily <= srcSize) - * or an error code (testable with LZ4F_isError()) - */ -static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize) -{ - unsigned blockMode, blockChecksumFlag, contentSizeFlag, contentChecksumFlag, dictIDFlag, blockSizeID; - size_t frameHeaderSize; - const BYTE* srcPtr = (const BYTE*)src; - - DEBUGLOG(5, "LZ4F_decodeHeader"); - /* need to decode header to get frameInfo */ - RETURN_ERROR_IF(srcSize < minFHSize, frameHeader_incomplete); /* minimal frame header size */ - MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo)); - - /* special case : skippable frames */ - if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) { - dctx->frameInfo.frameType = LZ4F_skippableFrame; - if (src == (void*)(dctx->header)) { - dctx->tmpInSize = srcSize; - dctx->tmpInTarget = 8; - dctx->dStage = dstage_storeSFrameSize; - return srcSize; - } else { - dctx->dStage = dstage_getSFrameSize; - return 4; - } } - - /* control magic number */ -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) { - DEBUGLOG(4, "frame header error : unknown magic number"); - RETURN_ERROR(frameType_unknown); - } -#endif - dctx->frameInfo.frameType = LZ4F_frame; - - /* Flags */ - { U32 const FLG = srcPtr[4]; - U32 const version = (FLG>>6) & _2BITS; - blockChecksumFlag = (FLG>>4) & _1BIT; - blockMode = (FLG>>5) & _1BIT; - contentSizeFlag = (FLG>>3) & _1BIT; - contentChecksumFlag = (FLG>>2) & _1BIT; - dictIDFlag = FLG & _1BIT; - /* validate */ - if (((FLG>>1)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */ - if (version != 1) RETURN_ERROR(headerVersion_wrong); /* Version Number, only supported value */ - } - - /* Frame Header Size */ - frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0); - - if (srcSize < frameHeaderSize) { - /* not enough input to fully decode frame header */ - if (srcPtr != dctx->header) - memcpy(dctx->header, srcPtr, srcSize); - dctx->tmpInSize = srcSize; - dctx->tmpInTarget = frameHeaderSize; - dctx->dStage = dstage_storeFrameHeader; - return srcSize; - } - - { U32 const BD = srcPtr[5]; - blockSizeID = (BD>>4) & _3BITS; - /* validate */ - if (((BD>>7)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */ - if (blockSizeID < 4) RETURN_ERROR(maxBlockSize_invalid); /* 4-7 only supported values for the time being */ - if (((BD>>0)&_4BITS) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bits */ - } - - /* check header */ - assert(frameHeaderSize > 5); -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - { BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5); - RETURN_ERROR_IF(HC != srcPtr[frameHeaderSize-1], headerChecksum_invalid); - } -#endif - - /* save */ - dctx->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode; - dctx->frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)blockChecksumFlag; - dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag; - dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID; - dctx->maxBlockSize = LZ4F_getBlockSize((LZ4F_blockSizeID_t)blockSizeID); - if (contentSizeFlag) - dctx->frameRemainingSize = dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6); - if (dictIDFlag) - dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5); - - dctx->dStage = dstage_init; - - return frameHeaderSize; -} - - -/*! LZ4F_headerSize() : - * @return : size of frame header - * or an error code, which can be tested using LZ4F_isError() - */ -size_t LZ4F_headerSize(const void* src, size_t srcSize) -{ - RETURN_ERROR_IF(src == NULL, srcPtr_wrong); - - /* minimal srcSize to determine header size */ - if (srcSize < LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH) - RETURN_ERROR(frameHeader_incomplete); - - /* special case : skippable frames */ - if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) - return 8; - - /* control magic number */ -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER) - RETURN_ERROR(frameType_unknown); -#endif - - /* Frame Header Size */ - { BYTE const FLG = ((const BYTE*)src)[4]; - U32 const contentSizeFlag = (FLG>>3) & _1BIT; - U32 const dictIDFlag = FLG & _1BIT; - return minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0); - } -} - -/*! LZ4F_getFrameInfo() : - * This function extracts frame parameters (max blockSize, frame checksum, etc.). - * Usage is optional. Objective is to provide relevant information for allocation purposes. - * This function works in 2 situations : - * - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process. - * Amount of input data provided must be large enough to successfully decode the frame header. - * A header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. It's possible to provide more input data than this minimum. - * - After decoding has been started. In which case, no input is read, frame parameters are extracted from dctx. - * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value). - * Decompression must resume from (srcBuffer + *srcSizePtr). - * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call, - * or an error code which can be tested using LZ4F_isError() - * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped. - * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure. - */ -LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx, - LZ4F_frameInfo_t* frameInfoPtr, - const void* srcBuffer, size_t* srcSizePtr) -{ - LZ4F_STATIC_ASSERT(dstage_getFrameHeader < dstage_storeFrameHeader); - if (dctx->dStage > dstage_storeFrameHeader) { - /* frameInfo already decoded */ - size_t o=0, i=0; - *srcSizePtr = 0; - *frameInfoPtr = dctx->frameInfo; - /* returns : recommended nb of bytes for LZ4F_decompress() */ - return LZ4F_decompress(dctx, NULL, &o, NULL, &i, NULL); - } else { - if (dctx->dStage == dstage_storeFrameHeader) { - /* frame decoding already started, in the middle of header => automatic fail */ - *srcSizePtr = 0; - RETURN_ERROR(frameDecoding_alreadyStarted); - } else { - size_t const hSize = LZ4F_headerSize(srcBuffer, *srcSizePtr); - if (LZ4F_isError(hSize)) { *srcSizePtr=0; return hSize; } - if (*srcSizePtr < hSize) { - *srcSizePtr=0; - RETURN_ERROR(frameHeader_incomplete); - } - - { size_t decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize); - if (LZ4F_isError(decodeResult)) { - *srcSizePtr = 0; - } else { - *srcSizePtr = decodeResult; - decodeResult = BHSize; /* block header size */ - } - *frameInfoPtr = dctx->frameInfo; - return decodeResult; - } } } -} - - -/* LZ4F_updateDict() : - * only used for LZ4F_blockLinked mode - * Condition : @dstPtr != NULL - */ -static void LZ4F_updateDict(LZ4F_dctx* dctx, - const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart, - unsigned withinTmp) -{ - assert(dstPtr != NULL); - if (dctx->dictSize==0) dctx->dict = (const BYTE*)dstPtr; /* will lead to prefix mode */ - assert(dctx->dict != NULL); - - if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */ - dctx->dictSize += dstSize; - return; - } - - assert(dstPtr >= dstBufferStart); - if ((size_t)(dstPtr - dstBufferStart) + dstSize >= 64 KB) { /* history in dstBuffer becomes large enough to become dictionary */ - dctx->dict = (const BYTE*)dstBufferStart; - dctx->dictSize = (size_t)(dstPtr - dstBufferStart) + dstSize; - return; - } - - assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */ - - /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */ - assert(dctx->tmpOutBuffer != NULL); - - if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */ - /* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */ - assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart); - dctx->dictSize += dstSize; - return; - } - - if (withinTmp) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */ - size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer); - size_t copySize = 64 KB - dctx->tmpOutSize; - const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart; - if (dctx->tmpOutSize > 64 KB) copySize = 0; - if (copySize > preserveSize) copySize = preserveSize; - - memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize); - - dctx->dict = dctx->tmpOutBuffer; - dctx->dictSize = preserveSize + dctx->tmpOutStart + dstSize; - return; - } - - if (dctx->dict == dctx->tmpOutBuffer) { /* copy dst into tmp to complete dict */ - if (dctx->dictSize + dstSize > dctx->maxBufferSize) { /* tmp buffer not large enough */ - size_t const preserveSize = 64 KB - dstSize; - memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize); - dctx->dictSize = preserveSize; - } - memcpy(dctx->tmpOutBuffer + dctx->dictSize, dstPtr, dstSize); - dctx->dictSize += dstSize; - return; - } - - /* join dict & dest into tmp */ - { size_t preserveSize = 64 KB - dstSize; - if (preserveSize > dctx->dictSize) preserveSize = dctx->dictSize; - memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize); - memcpy(dctx->tmpOutBuffer + preserveSize, dstPtr, dstSize); - dctx->dict = dctx->tmpOutBuffer; - dctx->dictSize = preserveSize + dstSize; - } -} - - -/*! LZ4F_decompress() : - * Call this function repetitively to regenerate compressed data in srcBuffer. - * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer - * into dstBuffer of capacity *dstSizePtr. - * - * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value). - * - * The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value). - * If number of bytes read is < number of bytes provided, then decompression operation is not complete. - * Remaining data will have to be presented again in a subsequent invocation. - * - * The function result is an hint of the better srcSize to use for next call to LZ4F_decompress. - * Schematically, it's the size of the current (or remaining) compressed block + header of next block. - * Respecting the hint provides a small boost to performance, since it allows less buffer shuffling. - * Note that this is just a hint, and it's always possible to any srcSize value. - * When a frame is fully decoded, @return will be 0. - * If decompression failed, @return is an error code which can be tested using LZ4F_isError(). - */ -size_t LZ4F_decompress(LZ4F_dctx* dctx, - void* dstBuffer, size_t* dstSizePtr, - const void* srcBuffer, size_t* srcSizePtr, - const LZ4F_decompressOptions_t* decompressOptionsPtr) -{ - LZ4F_decompressOptions_t optionsNull; - const BYTE* const srcStart = (const BYTE*)srcBuffer; - const BYTE* const srcEnd = srcStart + *srcSizePtr; - const BYTE* srcPtr = srcStart; - BYTE* const dstStart = (BYTE*)dstBuffer; - BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL; - BYTE* dstPtr = dstStart; - const BYTE* selectedIn = NULL; - unsigned doAnotherStage = 1; - size_t nextSrcSizeHint = 1; - - - DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u", - srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr); - if (dstBuffer == NULL) assert(*dstSizePtr == 0); - MEM_INIT(&optionsNull, 0, sizeof(optionsNull)); - if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull; - *srcSizePtr = 0; - *dstSizePtr = 0; - assert(dctx != NULL); - dctx->skipChecksum |= (decompressOptionsPtr->skipChecksums != 0); /* once set, disable for the remainder of the frame */ - - /* behaves as a state machine */ - - while (doAnotherStage) { - - switch(dctx->dStage) - { - - case dstage_getFrameHeader: - DEBUGLOG(6, "dstage_getFrameHeader"); - if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */ - size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */ - FORWARD_IF_ERROR(hSize); - srcPtr += hSize; - break; - } - dctx->tmpInSize = 0; - if (srcEnd-srcPtr == 0) return minFHSize; /* 0-size input */ - dctx->tmpInTarget = minFHSize; /* minimum size to decode header */ - dctx->dStage = dstage_storeFrameHeader; - /* fall-through */ - - case dstage_storeFrameHeader: - DEBUGLOG(6, "dstage_storeFrameHeader"); - { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr)); - memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); - dctx->tmpInSize += sizeToCopy; - srcPtr += sizeToCopy; - } - if (dctx->tmpInSize < dctx->tmpInTarget) { - nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */ - doAnotherStage = 0; /* not enough src data, ask for some more */ - break; - } - FORWARD_IF_ERROR( LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget) ); /* will update dStage appropriately */ - break; - - case dstage_init: - DEBUGLOG(6, "dstage_init"); - if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0); - /* internal buffers allocation */ - { size_t const bufferNeeded = dctx->maxBlockSize - + ((dctx->frameInfo.blockMode==LZ4F_blockLinked) ? 128 KB : 0); - if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */ - dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/ - LZ4F_free(dctx->tmpIn, dctx->cmem); - dctx->tmpIn = (BYTE*)LZ4F_malloc(dctx->maxBlockSize + BFSize /* block checksum */, dctx->cmem); - RETURN_ERROR_IF(dctx->tmpIn == NULL, allocation_failed); - LZ4F_free(dctx->tmpOutBuffer, dctx->cmem); - dctx->tmpOutBuffer= (BYTE*)LZ4F_malloc(bufferNeeded, dctx->cmem); - RETURN_ERROR_IF(dctx->tmpOutBuffer== NULL, allocation_failed); - dctx->maxBufferSize = bufferNeeded; - } } - dctx->tmpInSize = 0; - dctx->tmpInTarget = 0; - dctx->tmpOut = dctx->tmpOutBuffer; - dctx->tmpOutStart = 0; - dctx->tmpOutSize = 0; - - dctx->dStage = dstage_getBlockHeader; - /* fall-through */ - - case dstage_getBlockHeader: - if ((size_t)(srcEnd - srcPtr) >= BHSize) { - selectedIn = srcPtr; - srcPtr += BHSize; - } else { - /* not enough input to read cBlockSize field */ - dctx->tmpInSize = 0; - dctx->dStage = dstage_storeBlockHeader; - } - - if (dctx->dStage == dstage_storeBlockHeader) /* can be skipped */ - case dstage_storeBlockHeader: - { size_t const remainingInput = (size_t)(srcEnd - srcPtr); - size_t const wantedData = BHSize - dctx->tmpInSize; - size_t const sizeToCopy = MIN(wantedData, remainingInput); - memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); - srcPtr += sizeToCopy; - dctx->tmpInSize += sizeToCopy; - - if (dctx->tmpInSize < BHSize) { /* not enough input for cBlockSize */ - nextSrcSizeHint = BHSize - dctx->tmpInSize; - doAnotherStage = 0; - break; - } - selectedIn = dctx->tmpIn; - } /* if (dctx->dStage == dstage_storeBlockHeader) */ - - /* decode block header */ - { U32 const blockHeader = LZ4F_readLE32(selectedIn); - size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU; - size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize; - if (blockHeader==0) { /* frameEnd signal, no more block */ - DEBUGLOG(5, "end of frame"); - dctx->dStage = dstage_getSuffix; - break; - } - if (nextCBlockSize > dctx->maxBlockSize) { - RETURN_ERROR(maxBlockSize_invalid); - } - if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) { - /* next block is uncompressed */ - dctx->tmpInTarget = nextCBlockSize; - DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize); - if (dctx->frameInfo.blockChecksumFlag) { - (void)XXH32_reset(&dctx->blockChecksum, 0); - } - dctx->dStage = dstage_copyDirect; - break; - } - /* next block is a compressed block */ - dctx->tmpInTarget = nextCBlockSize + crcSize; - dctx->dStage = dstage_getCBlock; - if (dstPtr==dstEnd || srcPtr==srcEnd) { - nextSrcSizeHint = BHSize + nextCBlockSize + crcSize; - doAnotherStage = 0; - } - break; - } - - case dstage_copyDirect: /* uncompressed block */ - DEBUGLOG(6, "dstage_copyDirect"); - { size_t sizeToCopy; - if (dstPtr == NULL) { - sizeToCopy = 0; - } else { - size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr)); - sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize); - memcpy(dstPtr, srcPtr, sizeToCopy); - if (!dctx->skipChecksum) { - if (dctx->frameInfo.blockChecksumFlag) { - (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy); - } - if (dctx->frameInfo.contentChecksumFlag) - (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy); - } - if (dctx->frameInfo.contentSize) - dctx->frameRemainingSize -= sizeToCopy; - - /* history management (linked blocks only)*/ - if (dctx->frameInfo.blockMode == LZ4F_blockLinked) { - LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0); - } - srcPtr += sizeToCopy; - dstPtr += sizeToCopy; - } - if (sizeToCopy == dctx->tmpInTarget) { /* all done */ - if (dctx->frameInfo.blockChecksumFlag) { - dctx->tmpInSize = 0; - dctx->dStage = dstage_getBlockChecksum; - } else - dctx->dStage = dstage_getBlockHeader; /* new block */ - break; - } - dctx->tmpInTarget -= sizeToCopy; /* need to copy more */ - } - nextSrcSizeHint = dctx->tmpInTarget + - +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0) - + BHSize /* next header size */; - doAnotherStage = 0; - break; - - /* check block checksum for recently transferred uncompressed block */ - case dstage_getBlockChecksum: - DEBUGLOG(6, "dstage_getBlockChecksum"); - { const void* crcSrc; - if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) { - crcSrc = srcPtr; - srcPtr += 4; - } else { - size_t const stillToCopy = 4 - dctx->tmpInSize; - size_t const sizeToCopy = MIN(stillToCopy, (size_t)(srcEnd-srcPtr)); - memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); - dctx->tmpInSize += sizeToCopy; - srcPtr += sizeToCopy; - if (dctx->tmpInSize < 4) { /* all input consumed */ - doAnotherStage = 0; - break; - } - crcSrc = dctx->header; - } - if (!dctx->skipChecksum) { - U32 const readCRC = LZ4F_readLE32(crcSrc); - U32 const calcCRC = XXH32_digest(&dctx->blockChecksum); -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - DEBUGLOG(6, "compare block checksum"); - if (readCRC != calcCRC) { - DEBUGLOG(4, "incorrect block checksum: %08X != %08X", - readCRC, calcCRC); - RETURN_ERROR(blockChecksum_invalid); - } -#else - (void)readCRC; - (void)calcCRC; -#endif - } } - dctx->dStage = dstage_getBlockHeader; /* new block */ - break; - - case dstage_getCBlock: - DEBUGLOG(6, "dstage_getCBlock"); - if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) { - dctx->tmpInSize = 0; - dctx->dStage = dstage_storeCBlock; - break; - } - /* input large enough to read full block directly */ - selectedIn = srcPtr; - srcPtr += dctx->tmpInTarget; - - if (0) /* always jump over next block */ - case dstage_storeCBlock: - { size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize; - size_t const inputLeft = (size_t)(srcEnd-srcPtr); - size_t const sizeToCopy = MIN(wantedData, inputLeft); - memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); - dctx->tmpInSize += sizeToCopy; - srcPtr += sizeToCopy; - if (dctx->tmpInSize < dctx->tmpInTarget) { /* need more input */ - nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) - + (dctx->frameInfo.blockChecksumFlag ? BFSize : 0) - + BHSize /* next header size */; - doAnotherStage = 0; - break; - } - selectedIn = dctx->tmpIn; - } - - /* At this stage, input is large enough to decode a block */ - - /* First, decode and control block checksum if it exists */ - if (dctx->frameInfo.blockChecksumFlag) { - assert(dctx->tmpInTarget >= 4); - dctx->tmpInTarget -= 4; - assert(selectedIn != NULL); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */ - { U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget); - U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0); -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - RETURN_ERROR_IF(readBlockCrc != calcBlockCrc, blockChecksum_invalid); -#else - (void)readBlockCrc; - (void)calcBlockCrc; -#endif - } } - - /* decode directly into destination buffer if there is enough room */ - if ( ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize) - /* unless the dictionary is stored in tmpOut: - * in which case it's faster to decode within tmpOut - * to benefit from prefix speedup */ - && !(dctx->dict!= NULL && (const BYTE*)dctx->dict + dctx->dictSize == dctx->tmpOut) ) - { - const char* dict = (const char*)dctx->dict; - size_t dictSize = dctx->dictSize; - int decodedSize; - assert(dstPtr != NULL); - if (dict && dictSize > 1 GB) { - /* overflow control : dctx->dictSize is an int, avoid truncation / sign issues */ - dict += dictSize - 64 KB; - dictSize = 64 KB; - } - decodedSize = LZ4_decompress_safe_usingDict( - (const char*)selectedIn, (char*)dstPtr, - (int)dctx->tmpInTarget, (int)dctx->maxBlockSize, - dict, (int)dictSize); - RETURN_ERROR_IF(decodedSize < 0, decompressionFailed); - if ((dctx->frameInfo.contentChecksumFlag) && (!dctx->skipChecksum)) - XXH32_update(&(dctx->xxh), dstPtr, (size_t)decodedSize); - if (dctx->frameInfo.contentSize) - dctx->frameRemainingSize -= (size_t)decodedSize; - - /* dictionary management */ - if (dctx->frameInfo.blockMode==LZ4F_blockLinked) { - LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0); - } - - dstPtr += decodedSize; - dctx->dStage = dstage_getBlockHeader; /* end of block, let's get another one */ - break; - } - - /* not enough place into dst : decode into tmpOut */ - - /* manage dictionary */ - if (dctx->frameInfo.blockMode == LZ4F_blockLinked) { - if (dctx->dict == dctx->tmpOutBuffer) { - /* truncate dictionary to 64 KB if too big */ - if (dctx->dictSize > 128 KB) { - memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - 64 KB, 64 KB); - dctx->dictSize = 64 KB; - } - dctx->tmpOut = dctx->tmpOutBuffer + dctx->dictSize; - } else { /* dict not within tmpOut */ - size_t const reservedDictSpace = MIN(dctx->dictSize, 64 KB); - dctx->tmpOut = dctx->tmpOutBuffer + reservedDictSpace; - } } - - /* Decode block into tmpOut */ - { const char* dict = (const char*)dctx->dict; - size_t dictSize = dctx->dictSize; - int decodedSize; - if (dict && dictSize > 1 GB) { - /* the dictSize param is an int, avoid truncation / sign issues */ - dict += dictSize - 64 KB; - dictSize = 64 KB; - } - decodedSize = LZ4_decompress_safe_usingDict( - (const char*)selectedIn, (char*)dctx->tmpOut, - (int)dctx->tmpInTarget, (int)dctx->maxBlockSize, - dict, (int)dictSize); - RETURN_ERROR_IF(decodedSize < 0, decompressionFailed); - if (dctx->frameInfo.contentChecksumFlag && !dctx->skipChecksum) - XXH32_update(&(dctx->xxh), dctx->tmpOut, (size_t)decodedSize); - if (dctx->frameInfo.contentSize) - dctx->frameRemainingSize -= (size_t)decodedSize; - dctx->tmpOutSize = (size_t)decodedSize; - dctx->tmpOutStart = 0; - dctx->dStage = dstage_flushOut; - } - /* fall-through */ - - case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */ - DEBUGLOG(6, "dstage_flushOut"); - if (dstPtr != NULL) { - size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr)); - memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy); - - /* dictionary management */ - if (dctx->frameInfo.blockMode == LZ4F_blockLinked) - LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1 /*withinTmp*/); - - dctx->tmpOutStart += sizeToCopy; - dstPtr += sizeToCopy; - } - if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */ - dctx->dStage = dstage_getBlockHeader; /* get next block */ - break; - } - /* could not flush everything : stop there, just request a block header */ - doAnotherStage = 0; - nextSrcSizeHint = BHSize; - break; - - case dstage_getSuffix: - RETURN_ERROR_IF(dctx->frameRemainingSize, frameSize_wrong); /* incorrect frame size decoded */ - if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */ - nextSrcSizeHint = 0; - LZ4F_resetDecompressionContext(dctx); - doAnotherStage = 0; - break; - } - if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */ - dctx->tmpInSize = 0; - dctx->dStage = dstage_storeSuffix; - } else { - selectedIn = srcPtr; - srcPtr += 4; - } - - if (dctx->dStage == dstage_storeSuffix) /* can be skipped */ - case dstage_storeSuffix: - { size_t const remainingInput = (size_t)(srcEnd - srcPtr); - size_t const wantedData = 4 - dctx->tmpInSize; - size_t const sizeToCopy = MIN(wantedData, remainingInput); - memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); - srcPtr += sizeToCopy; - dctx->tmpInSize += sizeToCopy; - if (dctx->tmpInSize < 4) { /* not enough input to read complete suffix */ - nextSrcSizeHint = 4 - dctx->tmpInSize; - doAnotherStage=0; - break; - } - selectedIn = dctx->tmpIn; - } /* if (dctx->dStage == dstage_storeSuffix) */ - - /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */ - if (!dctx->skipChecksum) { - U32 const readCRC = LZ4F_readLE32(selectedIn); - U32 const resultCRC = XXH32_digest(&(dctx->xxh)); -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - RETURN_ERROR_IF(readCRC != resultCRC, contentChecksum_invalid); -#else - (void)readCRC; - (void)resultCRC; -#endif - } - nextSrcSizeHint = 0; - LZ4F_resetDecompressionContext(dctx); - doAnotherStage = 0; - break; - - case dstage_getSFrameSize: - if ((srcEnd - srcPtr) >= 4) { - selectedIn = srcPtr; - srcPtr += 4; - } else { - /* not enough input to read cBlockSize field */ - dctx->tmpInSize = 4; - dctx->tmpInTarget = 8; - dctx->dStage = dstage_storeSFrameSize; - } - - if (dctx->dStage == dstage_storeSFrameSize) - case dstage_storeSFrameSize: - { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, - (size_t)(srcEnd - srcPtr) ); - memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); - srcPtr += sizeToCopy; - dctx->tmpInSize += sizeToCopy; - if (dctx->tmpInSize < dctx->tmpInTarget) { - /* not enough input to get full sBlockSize; wait for more */ - nextSrcSizeHint = dctx->tmpInTarget - dctx->tmpInSize; - doAnotherStage = 0; - break; - } - selectedIn = dctx->header + 4; - } /* if (dctx->dStage == dstage_storeSFrameSize) */ - - /* case dstage_decodeSFrameSize: */ /* no direct entry */ - { size_t const SFrameSize = LZ4F_readLE32(selectedIn); - dctx->frameInfo.contentSize = SFrameSize; - dctx->tmpInTarget = SFrameSize; - dctx->dStage = dstage_skipSkippable; - break; - } - - case dstage_skipSkippable: - { size_t const skipSize = MIN(dctx->tmpInTarget, (size_t)(srcEnd-srcPtr)); - srcPtr += skipSize; - dctx->tmpInTarget -= skipSize; - doAnotherStage = 0; - nextSrcSizeHint = dctx->tmpInTarget; - if (nextSrcSizeHint) break; /* still more to skip */ - /* frame fully skipped : prepare context for a new frame */ - LZ4F_resetDecompressionContext(dctx); - break; - } - } /* switch (dctx->dStage) */ - } /* while (doAnotherStage) */ - - /* preserve history within tmpOut whenever necessary */ - LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2); - if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */ - && (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */ - && (dctx->dict != NULL) /* dictionary exists */ - && (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */ - && ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */ - { - if (dctx->dStage == dstage_flushOut) { - size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer); - size_t copySize = 64 KB - dctx->tmpOutSize; - const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart; - if (dctx->tmpOutSize > 64 KB) copySize = 0; - if (copySize > preserveSize) copySize = preserveSize; - assert(dctx->tmpOutBuffer != NULL); - - memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize); - - dctx->dict = dctx->tmpOutBuffer; - dctx->dictSize = preserveSize + dctx->tmpOutStart; - } else { - const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize; - size_t const newDictSize = MIN(dctx->dictSize, 64 KB); - - memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize); - - dctx->dict = dctx->tmpOutBuffer; - dctx->dictSize = newDictSize; - dctx->tmpOut = dctx->tmpOutBuffer + newDictSize; - } - } - - *srcSizePtr = (size_t)(srcPtr - srcStart); - *dstSizePtr = (size_t)(dstPtr - dstStart); - return nextSrcSizeHint; -} - -/*! LZ4F_decompress_usingDict() : - * Same as LZ4F_decompress(), using a predefined dictionary. - * Dictionary is used "in place", without any preprocessing. - * It must remain accessible throughout the entire frame decoding. - */ -size_t LZ4F_decompress_usingDict(LZ4F_dctx* dctx, - void* dstBuffer, size_t* dstSizePtr, - const void* srcBuffer, size_t* srcSizePtr, - const void* dict, size_t dictSize, - const LZ4F_decompressOptions_t* decompressOptionsPtr) -{ - if (dctx->dStage <= dstage_init) { - dctx->dict = (const BYTE*)dict; - dctx->dictSize = dictSize; - } - return LZ4F_decompress(dctx, dstBuffer, dstSizePtr, - srcBuffer, srcSizePtr, - decompressOptionsPtr); -} diff --git a/CompressSave/LZ4WrapC/lz4/lz4frame.h b/CompressSave/LZ4WrapC/lz4/lz4frame.h deleted file mode 100644 index e8ce66d..0000000 --- a/CompressSave/LZ4WrapC/lz4/lz4frame.h +++ /dev/null @@ -1,707 +0,0 @@ -/* - LZ4F - LZ4-Frame library - Header File - Copyright (C) 2011-2020, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ - -/* LZ4F is a stand-alone API able to create and decode LZ4 frames - * conformant with specification v1.6.1 in doc/lz4_Frame_format.md . - * Generated frames are compatible with `lz4` CLI. - * - * LZ4F also offers streaming capabilities. - * - * lz4.h is not required when using lz4frame.h, - * except to extract common constants such as LZ4_VERSION_NUMBER. - * */ - -#ifndef LZ4F_H_09782039843 -#define LZ4F_H_09782039843 - -#if defined (__cplusplus) -extern "C" { -#endif - -/* --- Dependency --- */ -#include /* size_t */ - - -/** - * Introduction - * - * lz4frame.h implements LZ4 frame specification: see doc/lz4_Frame_format.md . - * LZ4 Frames are compatible with `lz4` CLI, - * and designed to be interoperable with any system. -**/ - -/*-*************************************************************** - * Compiler specifics - *****************************************************************/ -/* LZ4_DLL_EXPORT : - * Enable exporting of functions when building a Windows DLL - * LZ4FLIB_VISIBILITY : - * Control library symbols visibility. - */ -#ifndef LZ4FLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define LZ4FLIB_VISIBILITY __attribute__ ((visibility ("default"))) -# else -# define LZ4FLIB_VISIBILITY -# endif -#endif -#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) -# define LZ4FLIB_API __declspec(dllexport) LZ4FLIB_VISIBILITY -#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) -# define LZ4FLIB_API __declspec(dllimport) LZ4FLIB_VISIBILITY -#else -# define LZ4FLIB_API LZ4FLIB_VISIBILITY -#endif - -#ifdef LZ4F_DISABLE_DEPRECATE_WARNINGS -# define LZ4F_DEPRECATE(x) x -#else -# if defined(_MSC_VER) -# define LZ4F_DEPRECATE(x) x /* __declspec(deprecated) x - only works with C++ */ -# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 6)) -# define LZ4F_DEPRECATE(x) x __attribute__((deprecated)) -# else -# define LZ4F_DEPRECATE(x) x /* no deprecation warning for this compiler */ -# endif -#endif - - -/*-************************************ - * Error management - **************************************/ -typedef size_t LZ4F_errorCode_t; - -LZ4FLIB_API unsigned LZ4F_isError(LZ4F_errorCode_t code); /**< tells when a function result is an error code */ -LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return error code string; for debugging */ - - -/*-************************************ - * Frame compression types - ************************************* */ -/* #define LZ4F_ENABLE_OBSOLETE_ENUMS // uncomment to enable obsolete enums */ -#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS -# define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x -#else -# define LZ4F_OBSOLETE_ENUM(x) -#endif - -/* The larger the block size, the (slightly) better the compression ratio, - * though there are diminishing returns. - * Larger blocks also increase memory usage on both compression and decompression sides. - */ -typedef enum { - LZ4F_default=0, - LZ4F_max64KB=4, - LZ4F_max256KB=5, - LZ4F_max1MB=6, - LZ4F_max4MB=7 - LZ4F_OBSOLETE_ENUM(max64KB) - LZ4F_OBSOLETE_ENUM(max256KB) - LZ4F_OBSOLETE_ENUM(max1MB) - LZ4F_OBSOLETE_ENUM(max4MB) -} LZ4F_blockSizeID_t; - -/* Linked blocks sharply reduce inefficiencies when using small blocks, - * they compress better. - * However, some LZ4 decoders are only compatible with independent blocks */ -typedef enum { - LZ4F_blockLinked=0, - LZ4F_blockIndependent - LZ4F_OBSOLETE_ENUM(blockLinked) - LZ4F_OBSOLETE_ENUM(blockIndependent) -} LZ4F_blockMode_t; - -typedef enum { - LZ4F_noContentChecksum=0, - LZ4F_contentChecksumEnabled - LZ4F_OBSOLETE_ENUM(noContentChecksum) - LZ4F_OBSOLETE_ENUM(contentChecksumEnabled) -} LZ4F_contentChecksum_t; - -typedef enum { - LZ4F_noBlockChecksum=0, - LZ4F_blockChecksumEnabled -} LZ4F_blockChecksum_t; - -typedef enum { - LZ4F_frame=0, - LZ4F_skippableFrame - LZ4F_OBSOLETE_ENUM(skippableFrame) -} LZ4F_frameType_t; - -#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS -typedef LZ4F_blockSizeID_t blockSizeID_t; -typedef LZ4F_blockMode_t blockMode_t; -typedef LZ4F_frameType_t frameType_t; -typedef LZ4F_contentChecksum_t contentChecksum_t; -#endif - -/*! LZ4F_frameInfo_t : - * makes it possible to set or read frame parameters. - * Structure must be first init to 0, using memset() or LZ4F_INIT_FRAMEINFO, - * setting all parameters to default. - * It's then possible to update selectively some parameters */ -typedef struct { - LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default */ - LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default */ - LZ4F_contentChecksum_t contentChecksumFlag; /* 1: frame terminated with 32-bit checksum of decompressed data; 0: disabled (default) */ - LZ4F_frameType_t frameType; /* read-only field : LZ4F_frame or LZ4F_skippableFrame */ - unsigned long long contentSize; /* Size of uncompressed content ; 0 == unknown */ - unsigned dictID; /* Dictionary ID, sent by compressor to help decoder select correct dictionary; 0 == no dictID provided */ - LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0: disabled (default) */ -} LZ4F_frameInfo_t; - -#define LZ4F_INIT_FRAMEINFO { LZ4F_default, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */ - -/*! LZ4F_preferences_t : - * makes it possible to supply advanced compression instructions to streaming interface. - * Structure must be first init to 0, using memset() or LZ4F_INIT_PREFERENCES, - * setting all parameters to default. - * All reserved fields must be set to zero. */ -typedef struct { - LZ4F_frameInfo_t frameInfo; - int compressionLevel; /* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */ - unsigned autoFlush; /* 1: always flush; reduces usage of internal buffers */ - unsigned favorDecSpeed; /* 1: parser favors decompression speed vs compression ratio. Only works for high compression modes (>= LZ4HC_CLEVEL_OPT_MIN) */ /* v1.8.2+ */ - unsigned reserved[3]; /* must be zero for forward compatibility */ -} LZ4F_preferences_t; - -#define LZ4F_INIT_PREFERENCES { LZ4F_INIT_FRAMEINFO, 0, 0u, 0u, { 0u, 0u, 0u } } /* v1.8.3+ */ - - -/*-********************************* -* Simple compression function -***********************************/ - -LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */ - -/*! LZ4F_compressFrameBound() : - * Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences. - * `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences. - * Note : this result is only usable with LZ4F_compressFrame(). - * It may also be relevant to LZ4F_compressUpdate() _only if_ no flush() operation is ever performed. - */ -LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr); - -/*! LZ4F_compressFrame() : - * Compress srcBuffer content into an LZ4-compressed frame. - * It's a one shot operation, all input content is consumed, and all output is generated. - * - * Note : it's a stateless operation (no LZ4F_cctx state needed). - * In order to reduce load on the allocator, LZ4F_compressFrame(), by default, - * uses the stack to allocate space for the compression state and some table. - * If this usage of the stack is too much for your application, - * consider compiling `lz4frame.c` with compile-time macro LZ4F_HEAPMODE set to 1 instead. - * All state allocations will use the Heap. - * It also means each invocation of LZ4F_compressFrame() will trigger several internal alloc/free invocations. - * - * @dstCapacity MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). - * @preferencesPtr is optional : one can provide NULL, in which case all preferences are set to default. - * @return : number of bytes written into dstBuffer. - * or an error code if it fails (can be tested using LZ4F_isError()) - */ -LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_preferences_t* preferencesPtr); - - -/*-*********************************** -* Advanced compression functions -*************************************/ -typedef struct LZ4F_cctx_s LZ4F_cctx; /* incomplete type */ -typedef LZ4F_cctx* LZ4F_compressionContext_t; /* for compatibility with older APIs, prefer using LZ4F_cctx */ - -typedef struct { - unsigned stableSrc; /* 1 == src content will remain present on future calls to LZ4F_compress(); skip copying src content within tmp buffer */ - unsigned reserved[3]; -} LZ4F_compressOptions_t; - -/*--- Resource Management ---*/ - -#define LZ4F_VERSION 100 /* This number can be used to check for an incompatible API breaking change */ -LZ4FLIB_API unsigned LZ4F_getVersion(void); - -/*! LZ4F_createCompressionContext() : - * The first thing to do is to create a compressionContext object, - * which will keep track of operation state during streaming compression. - * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version, - * and a pointer to LZ4F_cctx*, to write the resulting pointer into. - * @version provided MUST be LZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL. - * The function provides a pointer to a fully allocated LZ4F_cctx object. - * @cctxPtr MUST be != NULL. - * If @return != zero, context creation failed. - * A created compression context can be employed multiple times for consecutive streaming operations. - * Once all streaming compression jobs are completed, - * the state object can be released using LZ4F_freeCompressionContext(). - * Note1 : LZ4F_freeCompressionContext() is always successful. Its return value can be ignored. - * Note2 : LZ4F_freeCompressionContext() works fine with NULL input pointers (do nothing). -**/ -LZ4FLIB_API LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** cctxPtr, unsigned version); -LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx); - - -/*---- Compression ----*/ - -#define LZ4F_HEADER_SIZE_MIN 7 /* LZ4 Frame header size can vary, depending on selected parameters */ -#define LZ4F_HEADER_SIZE_MAX 19 - -/* Size in bytes of a block header in little-endian format. Highest bit indicates if block data is uncompressed */ -#define LZ4F_BLOCK_HEADER_SIZE 4 - -/* Size in bytes of a block checksum footer in little-endian format. */ -#define LZ4F_BLOCK_CHECKSUM_SIZE 4 - -/* Size in bytes of the content checksum. */ -#define LZ4F_CONTENT_CHECKSUM_SIZE 4 - -/*! LZ4F_compressBegin() : - * will write the frame header into dstBuffer. - * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. - * `prefsPtr` is optional : NULL can be provided to set all preferences to default. - * @return : number of bytes written into dstBuffer for the header - * or an error code (which can be tested using LZ4F_isError()) - */ -LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const LZ4F_preferences_t* prefsPtr); - -/*! LZ4F_compressBound() : - * Provides minimum dstCapacity required to guarantee success of - * LZ4F_compressUpdate(), given a srcSize and preferences, for a worst case scenario. - * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() instead. - * Note that the result is only valid for a single invocation of LZ4F_compressUpdate(). - * When invoking LZ4F_compressUpdate() multiple times, - * if the output buffer is gradually filled up instead of emptied and re-used from its start, - * one must check if there is enough remaining capacity before each invocation, using LZ4F_compressBound(). - * @return is always the same for a srcSize and prefsPtr. - * prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario. - * tech details : - * @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes. - * It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd(). - * @return doesn't include frame header, as it was already generated by LZ4F_compressBegin(). - */ -LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* prefsPtr); - -/*! LZ4F_compressUpdate() : - * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. - * Important rule: dstCapacity MUST be large enough to ensure operation success even in worst case situations. - * This value is provided by LZ4F_compressBound(). - * If this condition is not respected, LZ4F_compress() will fail (result is an errorCode). - * After an error, the state is left in a UB state, and must be re-initialized or freed. - * If previously an uncompressed block was written, buffered data is flushed - * before appending compressed data is continued. - * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default. - * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered). - * or an error code if it fails (which can be tested using LZ4F_isError()) - */ -LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_compressOptions_t* cOptPtr); - -/*! LZ4F_flush() : - * When data must be generated and sent immediately, without waiting for a block to be completely filled, - * it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx. - * `dstCapacity` must be large enough to ensure the operation will be successful. - * `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default. - * @return : nb of bytes written into dstBuffer (can be zero, when there is no data stored within cctx) - * or an error code if it fails (which can be tested using LZ4F_isError()) - * Note : LZ4F_flush() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr). - */ -LZ4FLIB_API size_t LZ4F_flush(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const LZ4F_compressOptions_t* cOptPtr); - -/*! LZ4F_compressEnd() : - * To properly finish an LZ4 frame, invoke LZ4F_compressEnd(). - * It will flush whatever data remained within `cctx` (like LZ4_flush()) - * and properly finalize the frame, with an endMark and a checksum. - * `cOptPtr` is optional : NULL can be provided, in which case all options will be set to default. - * @return : nb of bytes written into dstBuffer, necessarily >= 4 (endMark), - * or an error code if it fails (which can be tested using LZ4F_isError()) - * Note : LZ4F_compressEnd() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr). - * A successful call to LZ4F_compressEnd() makes `cctx` available again for another compression task. - */ -LZ4FLIB_API size_t LZ4F_compressEnd(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const LZ4F_compressOptions_t* cOptPtr); - - -/*-********************************* -* Decompression functions -***********************************/ -typedef struct LZ4F_dctx_s LZ4F_dctx; /* incomplete type */ -typedef LZ4F_dctx* LZ4F_decompressionContext_t; /* compatibility with previous API versions */ - -typedef struct { - unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified between invocations. - * This optimization skips storage operations in tmp buffers. */ - unsigned skipChecksums; /* disable checksum calculation and verification, even when one is present in frame, to save CPU time. - * Setting this option to 1 once disables all checksums for the rest of the frame. */ - unsigned reserved1; /* must be set to zero for forward compatibility */ - unsigned reserved0; /* idem */ -} LZ4F_decompressOptions_t; - - -/* Resource management */ - -/*! LZ4F_createDecompressionContext() : - * Create an LZ4F_dctx object, to track all decompression operations. - * @version provided MUST be LZ4F_VERSION. - * @dctxPtr MUST be valid. - * The function fills @dctxPtr with the value of a pointer to an allocated and initialized LZ4F_dctx object. - * The @return is an errorCode, which can be tested using LZ4F_isError(). - * dctx memory can be released using LZ4F_freeDecompressionContext(); - * Result of LZ4F_freeDecompressionContext() indicates current state of decompressionContext when being released. - * That is, it should be == 0 if decompression has been completed fully and correctly. - */ -LZ4FLIB_API LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version); -LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx); - - -/*-*********************************** -* Streaming decompression functions -*************************************/ - -#define LZ4F_MAGICNUMBER 0x184D2204U -#define LZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U -#define LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH 5 - -/*! LZ4F_headerSize() : v1.9.0+ - * Provide the header size of a frame starting at `src`. - * `srcSize` must be >= LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH, - * which is enough to decode the header length. - * @return : size of frame header - * or an error code, which can be tested using LZ4F_isError() - * note : Frame header size is variable, but is guaranteed to be - * >= LZ4F_HEADER_SIZE_MIN bytes, and <= LZ4F_HEADER_SIZE_MAX bytes. - */ -LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize); - -/*! LZ4F_getFrameInfo() : - * This function extracts frame parameters (max blockSize, dictID, etc.). - * Its usage is optional: user can also invoke LZ4F_decompress() directly. - * - * Extracted information will fill an existing LZ4F_frameInfo_t structure. - * This can be useful for allocation and dictionary identification purposes. - * - * LZ4F_getFrameInfo() can work in the following situations : - * - * 1) At the beginning of a new frame, before any invocation of LZ4F_decompress(). - * It will decode header from `srcBuffer`, - * consuming the header and starting the decoding process. - * - * Input size must be large enough to contain the full frame header. - * Frame header size can be known beforehand by LZ4F_headerSize(). - * Frame header size is variable, but is guaranteed to be >= LZ4F_HEADER_SIZE_MIN bytes, - * and not more than <= LZ4F_HEADER_SIZE_MAX bytes. - * Hence, blindly providing LZ4F_HEADER_SIZE_MAX bytes or more will always work. - * It's allowed to provide more input data than the header size, - * LZ4F_getFrameInfo() will only consume the header. - * - * If input size is not large enough, - * aka if it's smaller than header size, - * function will fail and return an error code. - * - * 2) After decoding has been started, - * it's possible to invoke LZ4F_getFrameInfo() anytime - * to extract already decoded frame parameters stored within dctx. - * - * Note that, if decoding has barely started, - * and not yet read enough information to decode the header, - * LZ4F_getFrameInfo() will fail. - * - * The number of bytes consumed from srcBuffer will be updated in *srcSizePtr (necessarily <= original value). - * LZ4F_getFrameInfo() only consumes bytes when decoding has not yet started, - * and when decoding the header has been successful. - * Decompression must then resume from (srcBuffer + *srcSizePtr). - * - * @return : a hint about how many srcSize bytes LZ4F_decompress() expects for next call, - * or an error code which can be tested using LZ4F_isError(). - * note 1 : in case of error, dctx is not modified. Decoding operation can resume from beginning safely. - * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure. - */ -LZ4FLIB_API size_t -LZ4F_getFrameInfo(LZ4F_dctx* dctx, - LZ4F_frameInfo_t* frameInfoPtr, - const void* srcBuffer, size_t* srcSizePtr); - -/*! LZ4F_decompress() : - * Call this function repetitively to regenerate data compressed in `srcBuffer`. - * - * The function requires a valid dctx state. - * It will read up to *srcSizePtr bytes from srcBuffer, - * and decompress data into dstBuffer, of capacity *dstSizePtr. - * - * The nb of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value). - * The nb of bytes decompressed into dstBuffer will be written into *dstSizePtr (necessarily <= original value). - * - * The function does not necessarily read all input bytes, so always check value in *srcSizePtr. - * Unconsumed source data must be presented again in subsequent invocations. - * - * `dstBuffer` can freely change between each consecutive function invocation. - * `dstBuffer` content will be overwritten. - * - * Note: if `LZ4F_getFrameInfo()` is called before `LZ4F_decompress()`, srcBuffer must be updated to reflect - * the number of bytes consumed after reading the frame header. Failure to update srcBuffer before calling - * `LZ4F_decompress()` will cause decompression failure or, even worse, successful but incorrect decompression. - * See the `LZ4F_getFrameInfo()` docs for details. - * - * @return : an hint of how many `srcSize` bytes LZ4F_decompress() expects for next call. - * Schematically, it's the size of the current (or remaining) compressed block + header of next block. - * Respecting the hint provides some small speed benefit, because it skips intermediate buffers. - * This is just a hint though, it's always possible to provide any srcSize. - * - * When a frame is fully decoded, @return will be 0 (no more data expected). - * When provided with more bytes than necessary to decode a frame, - * LZ4F_decompress() will stop reading exactly at end of current frame, and @return 0. - * - * If decompression failed, @return is an error code, which can be tested using LZ4F_isError(). - * After a decompression error, the `dctx` context is not resumable. - * Use LZ4F_resetDecompressionContext() to return to clean state. - * - * After a frame is fully decoded, dctx can be used again to decompress another frame. - */ -LZ4FLIB_API size_t -LZ4F_decompress(LZ4F_dctx* dctx, - void* dstBuffer, size_t* dstSizePtr, - const void* srcBuffer, size_t* srcSizePtr, - const LZ4F_decompressOptions_t* dOptPtr); - - -/*! LZ4F_resetDecompressionContext() : added in v1.8.0 - * In case of an error, the context is left in "undefined" state. - * In which case, it's necessary to reset it, before re-using it. - * This method can also be used to abruptly stop any unfinished decompression, - * and start a new one using same context resources. */ -LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always successful */ - - - -#if defined (__cplusplus) -} -#endif - -#endif /* LZ4F_H_09782039843 */ - -#if defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) -#define LZ4F_H_STATIC_09782039843 - -#if defined (__cplusplus) -extern "C" { -#endif - -/* These declarations are not stable and may change in the future. - * They are therefore only safe to depend on - * when the caller is statically linked against the library. - * To access their declarations, define LZ4F_STATIC_LINKING_ONLY. - * - * By default, these symbols aren't published into shared/dynamic libraries. - * You can override this behavior and force them to be published - * by defining LZ4F_PUBLISH_STATIC_FUNCTIONS. - * Use at your own risk. - */ -#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS -# define LZ4FLIB_STATIC_API LZ4FLIB_API -#else -# define LZ4FLIB_STATIC_API -#endif - - -/* --- Error List --- */ -#define LZ4F_LIST_ERRORS(ITEM) \ - ITEM(OK_NoError) \ - ITEM(ERROR_GENERIC) \ - ITEM(ERROR_maxBlockSize_invalid) \ - ITEM(ERROR_blockMode_invalid) \ - ITEM(ERROR_contentChecksumFlag_invalid) \ - ITEM(ERROR_compressionLevel_invalid) \ - ITEM(ERROR_headerVersion_wrong) \ - ITEM(ERROR_blockChecksum_invalid) \ - ITEM(ERROR_reservedFlag_set) \ - ITEM(ERROR_allocation_failed) \ - ITEM(ERROR_srcSize_tooLarge) \ - ITEM(ERROR_dstMaxSize_tooSmall) \ - ITEM(ERROR_frameHeader_incomplete) \ - ITEM(ERROR_frameType_unknown) \ - ITEM(ERROR_frameSize_wrong) \ - ITEM(ERROR_srcPtr_wrong) \ - ITEM(ERROR_decompressionFailed) \ - ITEM(ERROR_headerChecksum_invalid) \ - ITEM(ERROR_contentChecksum_invalid) \ - ITEM(ERROR_frameDecoding_alreadyStarted) \ - ITEM(ERROR_compressionState_uninitialized) \ - ITEM(ERROR_parameter_null) \ - ITEM(ERROR_maxCode) - -#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM, - -/* enum list is exposed, to handle specific errors */ -typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM) - _LZ4F_dummy_error_enum_for_c89_never_used } LZ4F_errorCodes; - -LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult); - - -/*! LZ4F_getBlockSize() : - * Return, in scalar format (size_t), - * the maximum block size associated with blockSizeID. -**/ -LZ4FLIB_STATIC_API size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID); - -/*! LZ4F_uncompressedUpdate() : - * LZ4F_uncompressedUpdate() can be called repetitively to add as much data uncompressed data as necessary. - * Important rule: dstCapacity MUST be large enough to store the entire source buffer as - * no compression is done for this operation - * If this condition is not respected, LZ4F_uncompressedUpdate() will fail (result is an errorCode). - * After an error, the state is left in a UB state, and must be re-initialized or freed. - * If previously a compressed block was written, buffered data is flushed - * before appending uncompressed data is continued. - * This is only supported when LZ4F_blockIndependent is used - * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default. - * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered). - * or an error code if it fails (which can be tested using LZ4F_isError()) - */ -LZ4FLIB_STATIC_API size_t -LZ4F_uncompressedUpdate(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_compressOptions_t* cOptPtr); - -/********************************** - * Bulk processing dictionary API - *********************************/ - -/* A Dictionary is useful for the compression of small messages (KB range). - * It dramatically improves compression efficiency. - * - * LZ4 can ingest any input as dictionary, though only the last 64 KB are useful. - * Best results are generally achieved by using Zstandard's Dictionary Builder - * to generate a high-quality dictionary from a set of samples. - * - * Loading a dictionary has a cost, since it involves construction of tables. - * The Bulk processing dictionary API makes it possible to share this cost - * over an arbitrary number of compression jobs, even concurrently, - * markedly improving compression latency for these cases. - * - * The same dictionary will have to be used on the decompression side - * for decoding to be successful. - * To help identify the correct dictionary at decoding stage, - * the frame header allows optional embedding of a dictID field. - */ -typedef struct LZ4F_CDict_s LZ4F_CDict; - -/*! LZ4_createCDict() : - * When compressing multiple messages / blocks using the same dictionary, it's recommended to load it just once. - * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. - * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. - * `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */ -LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize); -LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict); - - -/*! LZ4_compressFrame_usingCDict() : - * Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary. - * cctx must point to a context created by LZ4F_createCompressionContext(). - * If cdict==NULL, compress without a dictionary. - * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). - * If this condition is not respected, function will fail (@return an errorCode). - * The LZ4F_preferences_t structure is optional : you may provide NULL as argument, - * but it's not recommended, as it's the only way to provide dictID in the frame header. - * @return : number of bytes written into dstBuffer. - * or an error code if it fails (can be tested using LZ4F_isError()) */ -LZ4FLIB_STATIC_API size_t -LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const LZ4F_CDict* cdict, - const LZ4F_preferences_t* preferencesPtr); - - -/*! LZ4F_compressBegin_usingCDict() : - * Inits streaming dictionary compression, and writes the frame header into dstBuffer. - * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. - * `prefsPtr` is optional : you may provide NULL as argument, - * however, it's the only way to provide dictID in the frame header. - * @return : number of bytes written into dstBuffer for the header, - * or an error code (which can be tested using LZ4F_isError()) */ -LZ4FLIB_STATIC_API size_t -LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const LZ4F_CDict* cdict, - const LZ4F_preferences_t* prefsPtr); - - -/*! LZ4F_decompress_usingDict() : - * Same as LZ4F_decompress(), using a predefined dictionary. - * Dictionary is used "in place", without any preprocessing. -** It must remain accessible throughout the entire frame decoding. */ -LZ4FLIB_STATIC_API size_t -LZ4F_decompress_usingDict(LZ4F_dctx* dctxPtr, - void* dstBuffer, size_t* dstSizePtr, - const void* srcBuffer, size_t* srcSizePtr, - const void* dict, size_t dictSize, - const LZ4F_decompressOptions_t* decompressOptionsPtr); - - -/*! Custom memory allocation : v1.9.4+ - * These prototypes make it possible to pass custom allocation/free functions. - * LZ4F_customMem is provided at state creation time, using LZ4F_create*_advanced() listed below. - * All allocation/free operations will be completed using these custom variants instead of regular ones. - */ -typedef void* (*LZ4F_AllocFunction) (void* opaqueState, size_t size); -typedef void* (*LZ4F_CallocFunction) (void* opaqueState, size_t size); -typedef void (*LZ4F_FreeFunction) (void* opaqueState, void* address); -typedef struct { - LZ4F_AllocFunction customAlloc; - LZ4F_CallocFunction customCalloc; /* optional; when not defined, uses customAlloc + memset */ - LZ4F_FreeFunction customFree; - void* opaqueState; -} LZ4F_CustomMem; -static -#ifdef __GNUC__ -__attribute__((__unused__)) -#endif -LZ4F_CustomMem const LZ4F_defaultCMem = { NULL, NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */ - -LZ4FLIB_STATIC_API LZ4F_cctx* LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version); -LZ4FLIB_STATIC_API LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version); -LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict_advanced(LZ4F_CustomMem customMem, const void* dictBuffer, size_t dictSize); - - -#if defined (__cplusplus) -} -#endif - -#endif /* defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) */ diff --git a/CompressSave/LZ4WrapC/lz4/lz4frame_static.h b/CompressSave/LZ4WrapC/lz4/lz4frame_static.h deleted file mode 100644 index 2b44a63..0000000 --- a/CompressSave/LZ4WrapC/lz4/lz4frame_static.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - LZ4 auto-framing library - Header File for static linking only - Copyright (C) 2011-2020, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ - -#ifndef LZ4FRAME_STATIC_H_0398209384 -#define LZ4FRAME_STATIC_H_0398209384 - -/* The declarations that formerly were made here have been merged into - * lz4frame.h, protected by the LZ4F_STATIC_LINKING_ONLY macro. Going forward, - * it is recommended to simply include that header directly. - */ - -#define LZ4F_STATIC_LINKING_ONLY -#include "lz4frame.h" - -#endif /* LZ4FRAME_STATIC_H_0398209384 */ diff --git a/CompressSave/LZ4WrapC/lz4/lz4hc.c b/CompressSave/LZ4WrapC/lz4/lz4hc.c deleted file mode 100644 index 651f190..0000000 --- a/CompressSave/LZ4WrapC/lz4/lz4hc.c +++ /dev/null @@ -1,1637 +0,0 @@ -/* - LZ4 HC - High Compression Mode of LZ4 - Copyright (C) 2011-2020, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ -/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */ - - -/* ************************************* -* Tuning Parameter -***************************************/ - -/*! HEAPMODE : - * Select how stateless HC compression functions like `LZ4_compress_HC()` - * allocate memory for their workspace: - * in stack (0:fastest), or in heap (1:default, requires malloc()). - * Since workspace is rather large, heap mode is recommended. -**/ -#ifndef LZ4HC_HEAPMODE -# define LZ4HC_HEAPMODE 1 -#endif - - -/*=== Dependency ===*/ -#define LZ4_HC_STATIC_LINKING_ONLY -#include "lz4hc.h" - - -/*=== Common definitions ===*/ -#if defined(__GNUC__) -# pragma GCC diagnostic ignored "-Wunused-function" -#endif -#if defined (__clang__) -# pragma clang diagnostic ignored "-Wunused-function" -#endif - -#define LZ4_COMMONDEFS_ONLY -#ifndef LZ4_SRC_INCLUDED -#include "lz4.c" /* LZ4_count, constants, mem */ -#endif - - -/*=== Enums ===*/ -typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive; - - -/*=== Constants ===*/ -#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH) -#define LZ4_OPT_NUM (1<<12) - - -/*=== Macros ===*/ -#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) -#define MAX(a,b) ( (a) > (b) ? (a) : (b) ) -#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG)) -#define DELTANEXTMAXD(p) chainTable[(p) & LZ4HC_MAXD_MASK] /* flexible, LZ4HC_MAXD dependent */ -#define DELTANEXTU16(table, pos) table[(U16)(pos)] /* faster */ -/* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */ -#define UPDATABLE(ip, op, anchor) &ip, &op, &anchor - -#define LZ4HC_HASHSIZE 4 -static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); } - - -/************************************** -* HC Compression -**************************************/ -static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4) -{ - MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable)); - MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable)); -} - -static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start) -{ - size_t const bufferSize = (size_t)(hc4->end - hc4->prefixStart); - size_t newStartingOffset = bufferSize + hc4->dictLimit; - DEBUGLOG(5, "LZ4HC_init_internal"); - assert(newStartingOffset >= bufferSize); /* check overflow */ - if (newStartingOffset > 1 GB) { - LZ4HC_clearTables(hc4); - newStartingOffset = 0; - } - newStartingOffset += 64 KB; - hc4->nextToUpdate = (U32)newStartingOffset; - hc4->prefixStart = start; - hc4->end = start; - hc4->dictStart = start; - hc4->dictLimit = (U32)newStartingOffset; - hc4->lowLimit = (U32)newStartingOffset; -} - - -/* Update chains up to ip (excluded) */ -LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip) -{ - U16* const chainTable = hc4->chainTable; - U32* const hashTable = hc4->hashTable; - const BYTE* const prefixPtr = hc4->prefixStart; - U32 const prefixIdx = hc4->dictLimit; - U32 const target = (U32)(ip - prefixPtr) + prefixIdx; - U32 idx = hc4->nextToUpdate; - assert(ip >= prefixPtr); - assert(target >= prefixIdx); - - while (idx < target) { - U32 const h = LZ4HC_hashPtr(prefixPtr+idx-prefixIdx); - size_t delta = idx - hashTable[h]; - if (delta>LZ4_DISTANCE_MAX) delta = LZ4_DISTANCE_MAX; - DELTANEXTU16(chainTable, idx) = (U16)delta; - hashTable[h] = idx; - idx++; - } - - hc4->nextToUpdate = target; -} - -/** LZ4HC_countBack() : - * @return : negative value, nb of common bytes before ip/match */ -LZ4_FORCE_INLINE -int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match, - const BYTE* const iMin, const BYTE* const mMin) -{ - int back = 0; - int const min = (int)MAX(iMin - ip, mMin - match); - assert(min <= 0); - assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31)); - assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31)); - while ( (back > min) - && (ip[back-1] == match[back-1]) ) - back--; - return back; -} - -#if defined(_MSC_VER) -# define LZ4HC_rotl32(x,r) _rotl(x,r) -#else -# define LZ4HC_rotl32(x,r) ((x << r) | (x >> (32 - r))) -#endif - - -static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern) -{ - size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3; - if (bitsToRotate == 0) return pattern; - return LZ4HC_rotl32(pattern, (int)bitsToRotate); -} - -/* LZ4HC_countPattern() : - * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */ -static unsigned -LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32) -{ - const BYTE* const iStart = ip; - reg_t const pattern = (sizeof(pattern)==8) ? - (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32; - - while (likely(ip < iEnd-(sizeof(pattern)-1))) { - reg_t const diff = LZ4_read_ARCH(ip) ^ pattern; - if (!diff) { ip+=sizeof(pattern); continue; } - ip += LZ4_NbCommonBytes(diff); - return (unsigned)(ip - iStart); - } - - if (LZ4_isLittleEndian()) { - reg_t patternByte = pattern; - while ((ip>= 8; - } - } else { /* big endian */ - U32 bitOffset = (sizeof(pattern)*8) - 8; - while (ip < iEnd) { - BYTE const byte = (BYTE)(pattern >> bitOffset); - if (*ip != byte) break; - ip ++; bitOffset -= 8; - } } - - return (unsigned)(ip - iStart); -} - -/* LZ4HC_reverseCountPattern() : - * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) - * read using natural platform endianness */ -static unsigned -LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern) -{ - const BYTE* const iStart = ip; - - while (likely(ip >= iLow+4)) { - if (LZ4_read32(ip-4) != pattern) break; - ip -= 4; - } - { const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianness */ - while (likely(ip>iLow)) { - if (ip[-1] != *bytePtr) break; - ip--; bytePtr--; - } } - return (unsigned)(iStart - ip); -} - -/* LZ4HC_protectDictEnd() : - * Checks if the match is in the last 3 bytes of the dictionary, so reading the - * 4 byte MINMATCH would overflow. - * @returns true if the match index is okay. - */ -static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex) -{ - return ((U32)((dictLimit - 1) - matchIndex) >= 3); -} - -typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e; -typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e; - -typedef struct { - int off; - int len; -} LZ4HC_match_t; - -LZ4_FORCE_INLINE LZ4HC_match_t -LZ4HC_InsertAndGetWiderMatch ( - LZ4HC_CCtx_internal* const hc4, - const BYTE* const ip, - const BYTE* const iLowLimit, const BYTE* const iHighLimit, - int longest, - const BYTE** startpos, - const int maxNbAttempts, - const int patternAnalysis, const int chainSwap, - const dictCtx_directive dict, - const HCfavor_e favorDecSpeed) -{ - U16* const chainTable = hc4->chainTable; - U32* const hashTable = hc4->hashTable; - const LZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx; - const BYTE* const prefixPtr = hc4->prefixStart; - const U32 prefixIdx = hc4->dictLimit; - const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx; - const int withinStartDistance = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex); - const U32 lowestMatchIndex = (withinStartDistance) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX; - const BYTE* const dictStart = hc4->dictStart; - const U32 dictIdx = hc4->lowLimit; - const BYTE* const dictEnd = dictStart + prefixIdx - dictIdx; - int const lookBackLength = (int)(ip-iLowLimit); - int nbAttempts = maxNbAttempts; - U32 matchChainPos = 0; - U32 const pattern = LZ4_read32(ip); - U32 matchIndex; - repeat_state_e repeat = rep_untested; - size_t srcPatternLength = 0; - int offset = 0; - - DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch"); - assert(startpos != NULL); - *startpos = ip; /* in case there is no solution */ - /* First Match */ - LZ4HC_Insert(hc4, ip); /* insert all prior positions up to ip (excluded) */ - matchIndex = hashTable[LZ4HC_hashPtr(ip)]; - DEBUGLOG(7, "First candidate match for pos %u found at index %u / %u (lowestMatchIndex)", - ipIndex, matchIndex, lowestMatchIndex); - - while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) { - int matchLength=0; - nbAttempts--; - assert(matchIndex < ipIndex); - if (favorDecSpeed && (ipIndex - matchIndex < 8)) { - /* do nothing: - * favorDecSpeed intentionally skips matches with offset < 8 */ - } else if (matchIndex >= prefixIdx) { /* within current Prefix */ - const BYTE* const matchPtr = prefixPtr + (matchIndex - prefixIdx); - assert(matchPtr < ip); - assert(longest >= 1); - if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) { - if (LZ4_read32(matchPtr) == pattern) { - int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, prefixPtr) : 0; - matchLength = MINMATCH + (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit); - matchLength -= back; - if (matchLength > longest) { - longest = matchLength; - offset = (int)(ipIndex - matchIndex); - *startpos = ip + back; - DEBUGLOG(7, "Found match of len=%i within prefix, offset=%i, back=%i", longest, offset, -back); - } } } - } else { /* lowestMatchIndex <= matchIndex < dictLimit : within Ext Dict */ - const BYTE* const matchPtr = dictStart + (matchIndex - dictIdx); - assert(matchIndex >= dictIdx); - if ( likely(matchIndex <= prefixIdx - 4) - && (LZ4_read32(matchPtr) == pattern) ) { - int back = 0; - const BYTE* vLimit = ip + (prefixIdx - matchIndex); - if (vLimit > iHighLimit) vLimit = iHighLimit; - matchLength = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; - if ((ip+matchLength == vLimit) && (vLimit < iHighLimit)) - matchLength += LZ4_count(ip+matchLength, prefixPtr, iHighLimit); - back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0; - matchLength -= back; - if (matchLength > longest) { - longest = matchLength; - offset = (int)(ipIndex - matchIndex); - *startpos = ip + back; - DEBUGLOG(7, "Found match of len=%i within dict, offset=%i, back=%i", longest, offset, -back); - } } } - - if (chainSwap && matchLength==longest) { /* better match => select a better chain */ - assert(lookBackLength==0); /* search forward only */ - if (matchIndex + (U32)longest <= ipIndex) { - int const kTrigger = 4; - U32 distanceToNextMatch = 1; - int const end = longest - MINMATCH + 1; - int step = 1; - int accel = 1 << kTrigger; - int pos; - for (pos = 0; pos < end; pos += step) { - U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + (U32)pos); - step = (accel++ >> kTrigger); - if (candidateDist > distanceToNextMatch) { - distanceToNextMatch = candidateDist; - matchChainPos = (U32)pos; - accel = 1 << kTrigger; - } } - if (distanceToNextMatch > 1) { - if (distanceToNextMatch > matchIndex) break; /* avoid overflow */ - matchIndex -= distanceToNextMatch; - continue; - } } } - - { U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex); - if (patternAnalysis && distNextMatch==1 && matchChainPos==0) { - U32 const matchCandidateIdx = matchIndex-1; - /* may be a repeated pattern */ - if (repeat == rep_untested) { - if ( ((pattern & 0xFFFF) == (pattern >> 16)) - & ((pattern & 0xFF) == (pattern >> 24)) ) { - DEBUGLOG(7, "Repeat pattern detected, char %02X", pattern >> 24); - repeat = rep_confirmed; - srcPatternLength = LZ4HC_countPattern(ip+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern); - } else { - repeat = rep_not; - } } - if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex) - && LZ4HC_protectDictEnd(prefixIdx, matchCandidateIdx) ) { - const int extDict = matchCandidateIdx < prefixIdx; - const BYTE* const matchPtr = extDict ? dictStart + (matchCandidateIdx - dictIdx) : prefixPtr + (matchCandidateIdx - prefixIdx); - if (LZ4_read32(matchPtr) == pattern) { /* good candidate */ - const BYTE* const iLimit = extDict ? dictEnd : iHighLimit; - size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern); - if (extDict && matchPtr + forwardPatternLength == iLimit) { - U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern); - forwardPatternLength += LZ4HC_countPattern(prefixPtr, iHighLimit, rotatedPattern); - } - { const BYTE* const lowestMatchPtr = extDict ? dictStart : prefixPtr; - size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern); - size_t currentSegmentLength; - if (!extDict - && matchPtr - backLength == prefixPtr - && dictIdx < prefixIdx) { - U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern); - backLength += LZ4HC_reverseCountPattern(dictEnd, dictStart, rotatedPattern); - } - /* Limit backLength not go further than lowestMatchIndex */ - backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex); - assert(matchCandidateIdx - backLength >= lowestMatchIndex); - currentSegmentLength = backLength + forwardPatternLength; - /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */ - if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */ - && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */ - U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */ - if (LZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) - matchIndex = newMatchIndex; - else { - /* Can only happen if started in the prefix */ - assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict); - matchIndex = prefixIdx; - } - } else { - U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */ - if (!LZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) { - assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict); - matchIndex = prefixIdx; - } else { - matchIndex = newMatchIndex; - if (lookBackLength==0) { /* no back possible */ - size_t const maxML = MIN(currentSegmentLength, srcPatternLength); - if ((size_t)longest < maxML) { - assert(prefixPtr - prefixIdx + matchIndex != ip); - if ((size_t)(ip - prefixPtr) + prefixIdx - matchIndex > LZ4_DISTANCE_MAX) break; - assert(maxML < 2 GB); - longest = (int)maxML; - offset = (int)(ipIndex - matchIndex); - *startpos = ip; - DEBUGLOG(7, "Found repeat pattern match of len=%i, offset=%i", longest, offset); - } - { U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex); - if (distToNextPattern > matchIndex) break; /* avoid overflow */ - matchIndex -= distToNextPattern; - } } } } } - continue; - } } - } } /* PA optimization */ - - /* follow current chain */ - matchIndex -= DELTANEXTU16(chainTable, matchIndex + matchChainPos); - - } /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */ - - if ( dict == usingDictCtxHc - && nbAttempts > 0 - && ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) { - size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit; - U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)]; - assert(dictEndOffset <= 1 GB); - matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset; - if (dictMatchIndex>0) DEBUGLOG(7, "dictEndOffset = %zu, dictMatchIndex = %u => relative matchIndex = %i", dictEndOffset, dictMatchIndex, (int)dictMatchIndex - (int)dictEndOffset); - while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) { - const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + dictMatchIndex; - - if (LZ4_read32(matchPtr) == pattern) { - int mlt; - int back = 0; - const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex); - if (vLimit > iHighLimit) vLimit = iHighLimit; - mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; - back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0; - mlt -= back; - if (mlt > longest) { - longest = mlt; - offset = (int)(ipIndex - matchIndex); - *startpos = ip + back; - DEBUGLOG(7, "found match of length %i within extDictCtx", longest); - } } - - { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex); - dictMatchIndex -= nextOffset; - matchIndex -= nextOffset; - } } } - - { LZ4HC_match_t md; - assert(longest >= 0); - md.len = longest; - md.off = offset; - return md; - } -} - -LZ4_FORCE_INLINE LZ4HC_match_t -LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */ - const BYTE* const ip, const BYTE* const iLimit, - const int maxNbAttempts, - const int patternAnalysis, - const dictCtx_directive dict) -{ - const BYTE* uselessPtr = ip; - DEBUGLOG(7, "LZ4HC_InsertAndFindBestMatch"); - /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), - * but this won't be the case here, as we define iLowLimit==ip, - * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ - return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, &uselessPtr, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio); -} - -/* LZ4HC_encodeSequence() : - * @return : 0 if ok, - * 1 if buffer issue detected */ -LZ4_FORCE_INLINE int LZ4HC_encodeSequence ( - const BYTE** _ip, - BYTE** _op, - const BYTE** _anchor, - int matchLength, - int offset, - limitedOutput_directive limit, - BYTE* oend) -{ -#define ip (*_ip) -#define op (*_op) -#define anchor (*_anchor) - - size_t length; - BYTE* const token = op++; - -#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6) - static const BYTE* start = NULL; - static U32 totalCost = 0; - U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start); - U32 const ll = (U32)(ip - anchor); - U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0; - U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0; - U32 const cost = 1 + llAdd + ll + 2 + mlAdd; - if (start==NULL) start = anchor; /* only works for single segment */ - /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */ - DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5i, cost:%4u + %5u", - pos, - (U32)(ip - anchor), matchLength, offset, - cost, totalCost); - totalCost += cost; -#endif - - /* Encode Literal length */ - length = (size_t)(ip - anchor); - LZ4_STATIC_ASSERT(notLimited == 0); - /* Check output limit */ - if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) { - DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)", - (int)length, (int)(oend - op)); - return 1; - } - if (length >= RUN_MASK) { - size_t len = length - RUN_MASK; - *token = (RUN_MASK << ML_BITS); - for(; len >= 255 ; len -= 255) *op++ = 255; - *op++ = (BYTE)len; - } else { - *token = (BYTE)(length << ML_BITS); - } - - /* Copy Literals */ - LZ4_wildCopy8(op, anchor, op + length); - op += length; - - /* Encode Offset */ - assert(offset <= LZ4_DISTANCE_MAX ); - assert(offset > 0); - LZ4_writeLE16(op, (U16)(offset)); op += 2; - - /* Encode MatchLength */ - assert(matchLength >= MINMATCH); - length = (size_t)matchLength - MINMATCH; - if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) { - DEBUGLOG(6, "Not enough room to write match length"); - return 1; /* Check output limit */ - } - if (length >= ML_MASK) { - *token += ML_MASK; - length -= ML_MASK; - for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; } - if (length >= 255) { length -= 255; *op++ = 255; } - *op++ = (BYTE)length; - } else { - *token += (BYTE)(length); - } - - /* Prepare next loop */ - ip += matchLength; - anchor = ip; - - return 0; -} -#undef ip -#undef op -#undef anchor - -LZ4_FORCE_INLINE int LZ4HC_compress_hashChain ( - LZ4HC_CCtx_internal* const ctx, - const char* const source, - char* const dest, - int* srcSizePtr, - int const maxOutputSize, - int maxNbAttempts, - const limitedOutput_directive limit, - const dictCtx_directive dict - ) -{ - const int inputSize = *srcSizePtr; - const int patternAnalysis = (maxNbAttempts > 128); /* levels 9+ */ - - const BYTE* ip = (const BYTE*) source; - const BYTE* anchor = ip; - const BYTE* const iend = ip + inputSize; - const BYTE* const mflimit = iend - MFLIMIT; - const BYTE* const matchlimit = (iend - LASTLITERALS); - - BYTE* optr = (BYTE*) dest; - BYTE* op = (BYTE*) dest; - BYTE* oend = op + maxOutputSize; - - const BYTE* start0; - const BYTE* start2 = NULL; - const BYTE* start3 = NULL; - LZ4HC_match_t m0, m1, m2, m3; - const LZ4HC_match_t nomatch = {0, 0}; - - /* init */ - DEBUGLOG(5, "LZ4HC_compress_hashChain (dict?=>%i)", dict); - *srcSizePtr = 0; - if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */ - if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */ - - /* Main Loop */ - while (ip <= mflimit) { - m1 = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, maxNbAttempts, patternAnalysis, dict); - if (m1.len encode ML1 immediately */ - optr = op; - if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), m1.len, m1.off, limit, oend)) goto _dest_overflow; - continue; - } - - if (start0 < ip) { /* first match was skipped at least once */ - if (start2 < ip + m0.len) { /* squeezing ML1 between ML0(original ML1) and ML2 */ - ip = start0; m1 = m0; /* restore initial Match1 */ - } } - - /* Here, start0==ip */ - if ((start2 - ip) < 3) { /* First Match too small : removed */ - ip = start2; - m1 = m2; - goto _Search2; - } - -_Search3: - if ((start2 - ip) < OPTIMAL_ML) { - int correction; - int new_ml = m1.len; - if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML; - if (ip+new_ml > start2 + m2.len - MINMATCH) - new_ml = (int)(start2 - ip) + m2.len - MINMATCH; - correction = new_ml - (int)(start2 - ip); - if (correction > 0) { - start2 += correction; - m2.len -= correction; - } - } - - if (start2 + m2.len <= mflimit) { - m3 = LZ4HC_InsertAndGetWiderMatch(ctx, - start2 + m2.len - 3, start2, matchlimit, m2.len, &start3, - maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio); - } else { - m3 = nomatch; /* do not search further */ - } - - if (m3.len <= m2.len) { /* No better match => encode ML1 and ML2 */ - /* ip & ref are known; Now for ml */ - if (start2 < ip+m1.len) m1.len = (int)(start2 - ip); - /* Now, encode 2 sequences */ - optr = op; - if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), m1.len, m1.off, limit, oend)) - goto _dest_overflow; - ip = start2; - optr = op; - if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), m2.len, m2.off, limit, oend)) { - m1 = m2; - goto _dest_overflow; - } - continue; - } - - if (start3 < ip+m1.len+3) { /* Not enough space for match 2 : remove it */ - if (start3 >= (ip+m1.len)) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */ - if (start2 < ip+m1.len) { - int correction = (int)(ip+m1.len - start2); - start2 += correction; - m2.len -= correction; - if (m2.len < MINMATCH) { - start2 = start3; - m2 = m3; - } - } - - optr = op; - if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), m1.len, m1.off, limit, oend)) goto _dest_overflow; - ip = start3; - m1 = m3; - - start0 = start2; - m0 = m2; - goto _Search2; - } - - start2 = start3; - m2 = m3; - goto _Search3; - } - - /* - * OK, now we have 3 ascending matches; - * let's write the first one ML1. - * ip & ref are known; Now decide ml. - */ - if (start2 < ip+m1.len) { - if ((start2 - ip) < OPTIMAL_ML) { - int correction; - if (m1.len > OPTIMAL_ML) m1.len = OPTIMAL_ML; - if (ip + m1.len > start2 + m2.len - MINMATCH) - m1.len = (int)(start2 - ip) + m2.len - MINMATCH; - correction = m1.len - (int)(start2 - ip); - if (correction > 0) { - start2 += correction; - m2.len -= correction; - } - } else { - m1.len = (int)(start2 - ip); - } - } - optr = op; - if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), m1.len, m1.off, limit, oend)) goto _dest_overflow; - - /* ML2 becomes ML1 */ - ip = start2; m1 = m2; - - /* ML3 becomes ML2 */ - start2 = start3; m2 = m3; - - /* let's find a new ML3 */ - goto _Search3; - } - -_last_literals: - /* Encode Last Literals */ - { size_t lastRunSize = (size_t)(iend - anchor); /* literals */ - size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255; - size_t const totalSize = 1 + llAdd + lastRunSize; - if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */ - if (limit && (op + totalSize > oend)) { - if (limit == limitedOutput) return 0; - /* adapt lastRunSize to fill 'dest' */ - lastRunSize = (size_t)(oend - op) - 1 /*token*/; - llAdd = (lastRunSize + 256 - RUN_MASK) / 256; - lastRunSize -= llAdd; - } - DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize); - ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */ - - if (lastRunSize >= RUN_MASK) { - size_t accumulator = lastRunSize - RUN_MASK; - *op++ = (RUN_MASK << ML_BITS); - for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255; - *op++ = (BYTE) accumulator; - } else { - *op++ = (BYTE)(lastRunSize << ML_BITS); - } - LZ4_memcpy(op, anchor, lastRunSize); - op += lastRunSize; - } - - /* End */ - *srcSizePtr = (int) (((const char*)ip) - source); - return (int) (((char*)op)-dest); - -_dest_overflow: - if (limit == fillOutput) { - /* Assumption : ip, anchor, ml and ref must be set correctly */ - size_t const ll = (size_t)(ip - anchor); - size_t const ll_addbytes = (ll + 240) / 255; - size_t const ll_totalCost = 1 + ll_addbytes + ll; - BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */ - DEBUGLOG(6, "Last sequence overflowing"); - op = optr; /* restore correct out pointer */ - if (op + ll_totalCost <= maxLitPos) { - /* ll validated; now adjust match length */ - size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost)); - size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255); - assert(maxMlSize < INT_MAX); assert(m1.len >= 0); - if ((size_t)m1.len > maxMlSize) m1.len = (int)maxMlSize; - if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + m1.len >= MFLIMIT) { - LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), m1.len, m1.off, notLimited, oend); - } } - goto _last_literals; - } - /* compression failed */ - return 0; -} - - -static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx, - const char* const source, char* dst, - int* srcSizePtr, int dstCapacity, - int const nbSearches, size_t sufficient_len, - const limitedOutput_directive limit, int const fullUpdate, - const dictCtx_directive dict, - const HCfavor_e favorDecSpeed); - - -LZ4_FORCE_INLINE int -LZ4HC_compress_generic_internal ( - LZ4HC_CCtx_internal* const ctx, - const char* const src, - char* const dst, - int* const srcSizePtr, - int const dstCapacity, - int cLevel, - const limitedOutput_directive limit, - const dictCtx_directive dict - ) -{ - typedef enum { lz4hc, lz4opt } lz4hc_strat_e; - typedef struct { - lz4hc_strat_e strat; - int nbSearches; - U32 targetLength; - } cParams_t; - static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = { - { lz4hc, 2, 16 }, /* 0, unused */ - { lz4hc, 2, 16 }, /* 1, unused */ - { lz4hc, 2, 16 }, /* 2, unused */ - { lz4hc, 4, 16 }, /* 3 */ - { lz4hc, 8, 16 }, /* 4 */ - { lz4hc, 16, 16 }, /* 5 */ - { lz4hc, 32, 16 }, /* 6 */ - { lz4hc, 64, 16 }, /* 7 */ - { lz4hc, 128, 16 }, /* 8 */ - { lz4hc, 256, 16 }, /* 9 */ - { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/ - { lz4opt, 512,128 }, /*11 */ - { lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */ - }; - - DEBUGLOG(5, "LZ4HC_compress_generic_internal(src=%p, srcSize=%d)", - src, *srcSizePtr); - - if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */ - if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */ - - ctx->end += *srcSizePtr; - if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT; /* note : convention is different from lz4frame, maybe something to review */ - cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel); - { cParams_t const cParam = clTable[cLevel]; - HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio; - int result; - - if (cParam.strat == lz4hc) { - result = LZ4HC_compress_hashChain(ctx, - src, dst, srcSizePtr, dstCapacity, - cParam.nbSearches, limit, dict); - } else { - assert(cParam.strat == lz4opt); - result = LZ4HC_compress_optimal(ctx, - src, dst, srcSizePtr, dstCapacity, - cParam.nbSearches, cParam.targetLength, limit, - cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */ - dict, favor); - } - if (result <= 0) ctx->dirty = 1; - return result; - } -} - -static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock); - -static int -LZ4HC_compress_generic_noDictCtx ( - LZ4HC_CCtx_internal* const ctx, - const char* const src, - char* const dst, - int* const srcSizePtr, - int const dstCapacity, - int cLevel, - limitedOutput_directive limit - ) -{ - assert(ctx->dictCtx == NULL); - return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx); -} - -static int -LZ4HC_compress_generic_dictCtx ( - LZ4HC_CCtx_internal* const ctx, - const char* const src, - char* const dst, - int* const srcSizePtr, - int const dstCapacity, - int cLevel, - limitedOutput_directive limit - ) -{ - const size_t position = (size_t)(ctx->end - ctx->prefixStart) + (ctx->dictLimit - ctx->lowLimit); - assert(ctx->dictCtx != NULL); - if (position >= 64 KB) { - ctx->dictCtx = NULL; - return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); - } else if (position == 0 && *srcSizePtr > 4 KB) { - LZ4_memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal)); - LZ4HC_setExternalDict(ctx, (const BYTE *)src); - ctx->compressionLevel = (short)cLevel; - return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); - } else { - return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, usingDictCtxHc); - } -} - -static int -LZ4HC_compress_generic ( - LZ4HC_CCtx_internal* const ctx, - const char* const src, - char* const dst, - int* const srcSizePtr, - int const dstCapacity, - int cLevel, - limitedOutput_directive limit - ) -{ - if (ctx->dictCtx == NULL) { - return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); - } else { - return LZ4HC_compress_generic_dictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); - } -} - - -int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); } - -static size_t LZ4_streamHC_t_alignment(void) -{ -#if LZ4_ALIGN_TEST - typedef struct { char c; LZ4_streamHC_t t; } t_a; - return sizeof(t_a) - sizeof(LZ4_streamHC_t); -#else - return 1; /* effectively disabled */ -#endif -} - -/* state is presumed correctly initialized, - * in which case its size and alignment have already been validate */ -int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) -{ - LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse; - if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0; - LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel); - LZ4HC_init_internal (ctx, (const BYTE*)src); - if (dstCapacity < LZ4_compressBound(srcSize)) - return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput); - else - return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, notLimited); -} - -int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) -{ - LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx)); - if (ctx==NULL) return 0; /* init failure */ - return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel); -} - -int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) -{ - int cSize; -#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 - LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t)); - if (statePtr==NULL) return 0; -#else - LZ4_streamHC_t state; - LZ4_streamHC_t* const statePtr = &state; -#endif - DEBUGLOG(5, "LZ4_compress_HC") - cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel); -#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 - FREEMEM(statePtr); -#endif - return cSize; -} - -/* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */ -int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel) -{ - LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx)); - if (ctx==NULL) return 0; /* init failure */ - LZ4HC_init_internal(&ctx->internal_donotuse, (const BYTE*) source); - LZ4_setCompressionLevel(ctx, cLevel); - return LZ4HC_compress_generic(&ctx->internal_donotuse, source, dest, sourceSizePtr, targetDestSize, cLevel, fillOutput); -} - - - -/************************************** -* Streaming Functions -**************************************/ -/* allocation */ -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -LZ4_streamHC_t* LZ4_createStreamHC(void) -{ - LZ4_streamHC_t* const state = - (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t)); - if (state == NULL) return NULL; - LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT); - return state; -} - -int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr) -{ - DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr); - if (!LZ4_streamHCPtr) return 0; /* support free on NULL */ - FREEMEM(LZ4_streamHCPtr); - return 0; -} -#endif - - -LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size) -{ - LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer; - DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size); - /* check conditions */ - if (buffer == NULL) return NULL; - if (size < sizeof(LZ4_streamHC_t)) return NULL; - if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL; - /* init */ - { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse); - MEM_INIT(hcstate, 0, sizeof(*hcstate)); } - LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT); - return LZ4_streamHCPtr; -} - -/* just a stub */ -void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) -{ - LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); - LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel); -} - -void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) -{ - LZ4HC_CCtx_internal* const s = &LZ4_streamHCPtr->internal_donotuse; - DEBUGLOG(5, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel); - if (s->dirty) { - LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); - } else { - assert(s->end >= s->prefixStart); - s->dictLimit += (U32)(s->end - s->prefixStart); - s->prefixStart = NULL; - s->end = NULL; - s->dictCtx = NULL; - } - LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel); -} - -void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) -{ - DEBUGLOG(5, "LZ4_setCompressionLevel(%p, %d)", LZ4_streamHCPtr, compressionLevel); - if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT; - if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX; - LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel; -} - -void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor) -{ - LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor!=0); -} - -/* LZ4_loadDictHC() : - * LZ4_streamHCPtr is presumed properly initialized */ -int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, - const char* dictionary, int dictSize) -{ - LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse; - DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d)", LZ4_streamHCPtr, dictionary, dictSize); - assert(LZ4_streamHCPtr != NULL); - if (dictSize > 64 KB) { - dictionary += (size_t)dictSize - 64 KB; - dictSize = 64 KB; - } - /* need a full initialization, there are bad side-effects when using resetFast() */ - { int const cLevel = ctxPtr->compressionLevel; - LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); - LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel); - } - LZ4HC_init_internal (ctxPtr, (const BYTE*)dictionary); - ctxPtr->end = (const BYTE*)dictionary + dictSize; - if (dictSize >= LZ4HC_HASHSIZE) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); - return dictSize; -} - -void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) { - working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL ? &(dictionary_stream->internal_donotuse) : NULL; -} - -/* compression */ - -static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock) -{ - DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock); - if (ctxPtr->end >= ctxPtr->prefixStart + 4) - LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */ - - /* Only one memory segment for extDict, so any previous extDict is lost at this stage */ - ctxPtr->lowLimit = ctxPtr->dictLimit; - ctxPtr->dictStart = ctxPtr->prefixStart; - ctxPtr->dictLimit += (U32)(ctxPtr->end - ctxPtr->prefixStart); - ctxPtr->prefixStart = newBlock; - ctxPtr->end = newBlock; - ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */ - - /* cannot reference an extDict and a dictCtx at the same time */ - ctxPtr->dictCtx = NULL; -} - -static int -LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr, - const char* src, char* dst, - int* srcSizePtr, int dstCapacity, - limitedOutput_directive limit) -{ - LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse; - DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)", - LZ4_streamHCPtr, src, *srcSizePtr, limit); - assert(ctxPtr != NULL); - /* auto-init if forgotten */ - if (ctxPtr->prefixStart == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src); - - /* Check overflow */ - if ((size_t)(ctxPtr->end - ctxPtr->prefixStart) + ctxPtr->dictLimit > 2 GB) { - size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->prefixStart); - if (dictSize > 64 KB) dictSize = 64 KB; - LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize); - } - - /* Check if blocks follow each other */ - if ((const BYTE*)src != ctxPtr->end) - LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src); - - /* Check overlapping input/dictionary space */ - { const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr; - const BYTE* const dictBegin = ctxPtr->dictStart; - const BYTE* const dictEnd = ctxPtr->dictStart + (ctxPtr->dictLimit - ctxPtr->lowLimit); - if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) { - if (sourceEnd > dictEnd) sourceEnd = dictEnd; - ctxPtr->lowLimit += (U32)(sourceEnd - ctxPtr->dictStart); - ctxPtr->dictStart += (U32)(sourceEnd - ctxPtr->dictStart); - /* invalidate dictionary is it's too small */ - if (ctxPtr->dictLimit - ctxPtr->lowLimit < LZ4HC_HASHSIZE) { - ctxPtr->lowLimit = ctxPtr->dictLimit; - ctxPtr->dictStart = ctxPtr->prefixStart; - } } } - - return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit); -} - -int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity) -{ - DEBUGLOG(5, "LZ4_compress_HC_continue"); - if (dstCapacity < LZ4_compressBound(srcSize)) - return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput); - else - return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, notLimited); -} - -int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize) -{ - return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, fillOutput); -} - - - -/* LZ4_saveDictHC : - * save history content - * into a user-provided buffer - * which is then used to continue compression - */ -int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize) -{ - LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse; - int const prefixSize = (int)(streamPtr->end - streamPtr->prefixStart); - DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize); - assert(prefixSize >= 0); - if (dictSize > 64 KB) dictSize = 64 KB; - if (dictSize < 4) dictSize = 0; - if (dictSize > prefixSize) dictSize = prefixSize; - if (safeBuffer == NULL) assert(dictSize == 0); - if (dictSize > 0) - LZ4_memmove(safeBuffer, streamPtr->end - dictSize, (size_t)dictSize); - { U32 const endIndex = (U32)(streamPtr->end - streamPtr->prefixStart) + streamPtr->dictLimit; - streamPtr->end = (safeBuffer == NULL) ? NULL : (const BYTE*)safeBuffer + dictSize; - streamPtr->prefixStart = (const BYTE*)safeBuffer; - streamPtr->dictLimit = endIndex - (U32)dictSize; - streamPtr->lowLimit = endIndex - (U32)dictSize; - streamPtr->dictStart = streamPtr->prefixStart; - if (streamPtr->nextToUpdate < streamPtr->dictLimit) - streamPtr->nextToUpdate = streamPtr->dictLimit; - } - return dictSize; -} - - -/*************************************************** -* Deprecated Functions -***************************************************/ - -/* These functions currently generate deprecation warnings */ - -/* Wrappers for deprecated compression functions */ -int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); } -int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); } -int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); } -int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); } -int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); } -int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); } -int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); } -int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); } -int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); } -int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); } - - -/* Deprecated streaming functions */ -int LZ4_sizeofStreamStateHC(void) { return sizeof(LZ4_streamHC_t); } - -/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t) - * @return : 0 on success, !=0 if error */ -int LZ4_resetStreamStateHC(void* state, char* inputBuffer) -{ - LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4)); - if (hc4 == NULL) return 1; /* init failed */ - LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer); - return 0; -} - -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -void* LZ4_createHC (const char* inputBuffer) -{ - LZ4_streamHC_t* const hc4 = LZ4_createStreamHC(); - if (hc4 == NULL) return NULL; /* not enough memory */ - LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer); - return hc4; -} - -int LZ4_freeHC (void* LZ4HC_Data) -{ - if (!LZ4HC_Data) return 0; /* support free on NULL */ - FREEMEM(LZ4HC_Data); - return 0; -} -#endif - -int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel) -{ - return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited); -} - -int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel) -{ - return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput); -} - -char* LZ4_slideInputBufferHC(void* LZ4HC_Data) -{ - LZ4HC_CCtx_internal* const s = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse; - const BYTE* const bufferStart = s->prefixStart - s->dictLimit + s->lowLimit; - LZ4_resetStreamHC_fast((LZ4_streamHC_t*)LZ4HC_Data, s->compressionLevel); - /* ugly conversion trick, required to evade (const char*) -> (char*) cast-qual warning :( */ - return (char*)(uptrval)bufferStart; -} - - -/* ================================================ - * LZ4 Optimal parser (levels [LZ4HC_CLEVEL_OPT_MIN - LZ4HC_CLEVEL_MAX]) - * ===============================================*/ -typedef struct { - int price; - int off; - int mlen; - int litlen; -} LZ4HC_optimal_t; - -/* price in bytes */ -LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen) -{ - int price = litlen; - assert(litlen >= 0); - if (litlen >= (int)RUN_MASK) - price += 1 + ((litlen-(int)RUN_MASK) / 255); - return price; -} - - -/* requires mlen >= MINMATCH */ -LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen) -{ - int price = 1 + 2 ; /* token + 16-bit offset */ - assert(litlen >= 0); - assert(mlen >= MINMATCH); - - price += LZ4HC_literalsPrice(litlen); - - if (mlen >= (int)(ML_MASK+MINMATCH)) - price += 1 + ((mlen-(int)(ML_MASK+MINMATCH)) / 255); - - return price; -} - - - -LZ4_FORCE_INLINE LZ4HC_match_t -LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx, - const BYTE* ip, const BYTE* const iHighLimit, - int minLen, int nbSearches, - const dictCtx_directive dict, - const HCfavor_e favorDecSpeed) -{ - LZ4HC_match_t const match0 = { 0 , 0 }; - /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), - * but this won't be the case here, as we define iLowLimit==ip, - ** so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ - LZ4HC_match_t md = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, &ip, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed); - if (md.len <= minLen) return match0; - if (favorDecSpeed) { - if ((md.len>18) & (md.len<=36)) md.len=18; /* favor shortcut */ - } - return md; -} - - -static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx, - const char* const source, - char* dst, - int* srcSizePtr, - int dstCapacity, - int const nbSearches, - size_t sufficient_len, - const limitedOutput_directive limit, - int const fullUpdate, - const dictCtx_directive dict, - const HCfavor_e favorDecSpeed) -{ - int retval = 0; -#define TRAILING_LITERALS 3 -#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 - LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS)); -#else - LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */ -#endif - - const BYTE* ip = (const BYTE*) source; - const BYTE* anchor = ip; - const BYTE* const iend = ip + *srcSizePtr; - const BYTE* const mflimit = iend - MFLIMIT; - const BYTE* const matchlimit = iend - LASTLITERALS; - BYTE* op = (BYTE*) dst; - BYTE* opSaved = (BYTE*) dst; - BYTE* oend = op + dstCapacity; - int ovml = MINMATCH; /* overflow - last sequence */ - int ovoff = 0; - - /* init */ -#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 - if (opt == NULL) goto _return_label; -#endif - DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity); - *srcSizePtr = 0; - if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */ - if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1; - - /* Main Loop */ - while (ip <= mflimit) { - int const llen = (int)(ip - anchor); - int best_mlen, best_off; - int cur, last_match_pos = 0; - - LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed); - if (firstMatch.len==0) { ip++; continue; } - - if ((size_t)firstMatch.len > sufficient_len) { - /* good enough solution : immediate encoding */ - int const firstML = firstMatch.len; - opSaved = op; - if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, firstMatch.off, limit, oend) ) { /* updates ip, op and anchor */ - ovml = firstML; - ovoff = firstMatch.off; - goto _dest_overflow; - } - continue; - } - - /* set prices for first positions (literals) */ - { int rPos; - for (rPos = 0 ; rPos < MINMATCH ; rPos++) { - int const cost = LZ4HC_literalsPrice(llen + rPos); - opt[rPos].mlen = 1; - opt[rPos].off = 0; - opt[rPos].litlen = llen + rPos; - opt[rPos].price = cost; - DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", - rPos, cost, opt[rPos].litlen); - } } - /* set prices using initial match */ - { int mlen = MINMATCH; - int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */ - int const offset = firstMatch.off; - assert(matchML < LZ4_OPT_NUM); - for ( ; mlen <= matchML ; mlen++) { - int const cost = LZ4HC_sequencePrice(llen, mlen); - opt[mlen].mlen = mlen; - opt[mlen].off = offset; - opt[mlen].litlen = llen; - opt[mlen].price = cost; - DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup", - mlen, cost, mlen); - } } - last_match_pos = firstMatch.len; - { int addLit; - for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { - opt[last_match_pos+addLit].mlen = 1; /* literal */ - opt[last_match_pos+addLit].off = 0; - opt[last_match_pos+addLit].litlen = addLit; - opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); - DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", - last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); - } } - - /* check further positions */ - for (cur = 1; cur < last_match_pos; cur++) { - const BYTE* const curPtr = ip + cur; - LZ4HC_match_t newMatch; - - if (curPtr > mflimit) break; - DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u", - cur, opt[cur].price, opt[cur+1].price, cur+1); - if (fullUpdate) { - /* not useful to search here if next position has same (or lower) cost */ - if ( (opt[cur+1].price <= opt[cur].price) - /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */ - && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) ) - continue; - } else { - /* not useful to search here if next position has same (or lower) cost */ - if (opt[cur+1].price <= opt[cur].price) continue; - } - - DEBUGLOG(7, "search at rPos:%u", cur); - if (fullUpdate) - newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed); - else - /* only test matches of minimum length; slightly faster, but misses a few bytes */ - newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches, dict, favorDecSpeed); - if (!newMatch.len) continue; - - if ( ((size_t)newMatch.len > sufficient_len) - || (newMatch.len + cur >= LZ4_OPT_NUM) ) { - /* immediate encoding */ - best_mlen = newMatch.len; - best_off = newMatch.off; - last_match_pos = cur + 1; - goto encode; - } - - /* before match : set price with literals at beginning */ - { int const baseLitlen = opt[cur].litlen; - int litlen; - for (litlen = 1; litlen < MINMATCH; litlen++) { - int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen); - int const pos = cur + litlen; - if (price < opt[pos].price) { - opt[pos].mlen = 1; /* literal */ - opt[pos].off = 0; - opt[pos].litlen = baseLitlen+litlen; - opt[pos].price = price; - DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", - pos, price, opt[pos].litlen); - } } } - - /* set prices using match at position = cur */ - { int const matchML = newMatch.len; - int ml = MINMATCH; - - assert(cur + newMatch.len < LZ4_OPT_NUM); - for ( ; ml <= matchML ; ml++) { - int const pos = cur + ml; - int const offset = newMatch.off; - int price; - int ll; - DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)", - pos, last_match_pos); - if (opt[cur].mlen == 1) { - ll = opt[cur].litlen; - price = ((cur > ll) ? opt[cur - ll].price : 0) - + LZ4HC_sequencePrice(ll, ml); - } else { - ll = 0; - price = opt[cur].price + LZ4HC_sequencePrice(0, ml); - } - - assert((U32)favorDecSpeed <= 1); - if (pos > last_match_pos+TRAILING_LITERALS - || price <= opt[pos].price - (int)favorDecSpeed) { - DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)", - pos, price, ml); - assert(pos < LZ4_OPT_NUM); - if ( (ml == matchML) /* last pos of last match */ - && (last_match_pos < pos) ) - last_match_pos = pos; - opt[pos].mlen = ml; - opt[pos].off = offset; - opt[pos].litlen = ll; - opt[pos].price = price; - } } } - /* complete following positions with literals */ - { int addLit; - for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { - opt[last_match_pos+addLit].mlen = 1; /* literal */ - opt[last_match_pos+addLit].off = 0; - opt[last_match_pos+addLit].litlen = addLit; - opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); - DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); - } } - } /* for (cur = 1; cur <= last_match_pos; cur++) */ - - assert(last_match_pos < LZ4_OPT_NUM + TRAILING_LITERALS); - best_mlen = opt[last_match_pos].mlen; - best_off = opt[last_match_pos].off; - cur = last_match_pos - best_mlen; - -encode: /* cur, last_match_pos, best_mlen, best_off must be set */ - assert(cur < LZ4_OPT_NUM); - assert(last_match_pos >= 1); /* == 1 when only one candidate */ - DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos); - { int candidate_pos = cur; - int selected_matchLength = best_mlen; - int selected_offset = best_off; - while (1) { /* from end to beginning */ - int const next_matchLength = opt[candidate_pos].mlen; /* can be 1, means literal */ - int const next_offset = opt[candidate_pos].off; - DEBUGLOG(7, "pos %i: sequence length %i", candidate_pos, selected_matchLength); - opt[candidate_pos].mlen = selected_matchLength; - opt[candidate_pos].off = selected_offset; - selected_matchLength = next_matchLength; - selected_offset = next_offset; - if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */ - assert(next_matchLength > 0); /* can be 1, means literal */ - candidate_pos -= next_matchLength; - } } - - /* encode all recorded sequences in order */ - { int rPos = 0; /* relative position (to ip) */ - while (rPos < last_match_pos) { - int const ml = opt[rPos].mlen; - int const offset = opt[rPos].off; - if (ml == 1) { ip++; rPos++; continue; } /* literal; note: can end up with several literals, in which case, skip them */ - rPos += ml; - assert(ml >= MINMATCH); - assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX)); - opSaved = op; - if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, offset, limit, oend) ) { /* updates ip, op and anchor */ - ovml = ml; - ovoff = offset; - goto _dest_overflow; - } } } - } /* while (ip <= mflimit) */ - -_last_literals: - /* Encode Last Literals */ - { size_t lastRunSize = (size_t)(iend - anchor); /* literals */ - size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255; - size_t const totalSize = 1 + llAdd + lastRunSize; - if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */ - if (limit && (op + totalSize > oend)) { - if (limit == limitedOutput) { /* Check output limit */ - retval = 0; - goto _return_label; - } - /* adapt lastRunSize to fill 'dst' */ - lastRunSize = (size_t)(oend - op) - 1 /*token*/; - llAdd = (lastRunSize + 256 - RUN_MASK) / 256; - lastRunSize -= llAdd; - } - DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize); - ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */ - - if (lastRunSize >= RUN_MASK) { - size_t accumulator = lastRunSize - RUN_MASK; - *op++ = (RUN_MASK << ML_BITS); - for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255; - *op++ = (BYTE) accumulator; - } else { - *op++ = (BYTE)(lastRunSize << ML_BITS); - } - LZ4_memcpy(op, anchor, lastRunSize); - op += lastRunSize; - } - - /* End */ - *srcSizePtr = (int) (((const char*)ip) - source); - retval = (int) ((char*)op-dst); - goto _return_label; - -_dest_overflow: -if (limit == fillOutput) { - /* Assumption : ip, anchor, ovml and ovref must be set correctly */ - size_t const ll = (size_t)(ip - anchor); - size_t const ll_addbytes = (ll + 240) / 255; - size_t const ll_totalCost = 1 + ll_addbytes + ll; - BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */ - DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved)); - op = opSaved; /* restore correct out pointer */ - if (op + ll_totalCost <= maxLitPos) { - /* ll validated; now adjust match length */ - size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost)); - size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255); - assert(maxMlSize < INT_MAX); assert(ovml >= 0); - if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize; - if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) { - DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml); - DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor); - LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovoff, notLimited, oend); - DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor); - } } - goto _last_literals; -} -_return_label: -#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 - FREEMEM(opt); -#endif - return retval; -} diff --git a/CompressSave/LZ4WrapC/lz4/lz4hc.h b/CompressSave/LZ4WrapC/lz4/lz4hc.h deleted file mode 100644 index e937acf..0000000 --- a/CompressSave/LZ4WrapC/lz4/lz4hc.h +++ /dev/null @@ -1,413 +0,0 @@ -/* - LZ4 HC - High Compression Mode of LZ4 - Header File - Copyright (C) 2011-2020, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ -#ifndef LZ4_HC_H_19834876238432 -#define LZ4_HC_H_19834876238432 - -#if defined (__cplusplus) -extern "C" { -#endif - -/* --- Dependency --- */ -/* note : lz4hc requires lz4.h/lz4.c for compilation */ -#include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */ - - -/* --- Useful constants --- */ -#define LZ4HC_CLEVEL_MIN 3 -#define LZ4HC_CLEVEL_DEFAULT 9 -#define LZ4HC_CLEVEL_OPT_MIN 10 -#define LZ4HC_CLEVEL_MAX 12 - - -/*-************************************ - * Block Compression - **************************************/ -/*! LZ4_compress_HC() : - * Compress data from `src` into `dst`, using the powerful but slower "HC" algorithm. - * `dst` must be already allocated. - * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h") - * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h") - * `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work. - * Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX. - * @return : the number of bytes written into 'dst' - * or 0 if compression fails. - */ -LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel); - - -/* Note : - * Decompression functions are provided within "lz4.h" (BSD license) - */ - - -/*! LZ4_compress_HC_extStateHC() : - * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`. - * `state` size is provided by LZ4_sizeofStateHC(). - * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly). - */ -LZ4LIB_API int LZ4_sizeofStateHC(void); -LZ4LIB_API int LZ4_compress_HC_extStateHC(void* stateHC, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel); - - -/*! LZ4_compress_HC_destSize() : v1.9.0+ - * Will compress as much data as possible from `src` - * to fit into `targetDstSize` budget. - * Result is provided in 2 parts : - * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize) - * or 0 if compression fails. - * `srcSizePtr` : on success, *srcSizePtr is updated to indicate how much bytes were read from `src` - */ -LZ4LIB_API int LZ4_compress_HC_destSize(void* stateHC, - const char* src, char* dst, - int* srcSizePtr, int targetDstSize, - int compressionLevel); - - -/*-************************************ - * Streaming Compression - * Bufferless synchronous API - **************************************/ - typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */ - -/*! LZ4_createStreamHC() and LZ4_freeStreamHC() : - * These functions create and release memory for LZ4 HC streaming state. - * Newly created states are automatically initialized. - * A same state can be used multiple times consecutively, - * starting with LZ4_resetStreamHC_fast() to start a new stream of blocks. - */ -LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void); -LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr); - -/* - These functions compress data in successive blocks of any size, - using previous blocks as dictionary, to improve compression ratio. - One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks. - There is an exception for ring buffers, which can be smaller than 64 KB. - Ring-buffer scenario is automatically detected and handled within LZ4_compress_HC_continue(). - - Before starting compression, state must be allocated and properly initialized. - LZ4_createStreamHC() does both, though compression level is set to LZ4HC_CLEVEL_DEFAULT. - - Selecting the compression level can be done with LZ4_resetStreamHC_fast() (starts a new stream) - or LZ4_setCompressionLevel() (anytime, between blocks in the same stream) (experimental). - LZ4_resetStreamHC_fast() only works on states which have been properly initialized at least once, - which is automatically the case when state is created using LZ4_createStreamHC(). - - After reset, a first "fictional block" can be designated as initial dictionary, - using LZ4_loadDictHC() (Optional). - - Invoke LZ4_compress_HC_continue() to compress each successive block. - The number of blocks is unlimited. - Previous input blocks, including initial dictionary when present, - must remain accessible and unmodified during compression. - - It's allowed to update compression level anytime between blocks, - using LZ4_setCompressionLevel() (experimental). - - 'dst' buffer should be sized to handle worst case scenarios - (see LZ4_compressBound(), it ensures compression success). - In case of failure, the API does not guarantee recovery, - so the state _must_ be reset. - To ensure compression success - whenever `dst` buffer size cannot be made >= LZ4_compressBound(), - consider using LZ4_compress_HC_continue_destSize(). - - Whenever previous input blocks can't be preserved unmodified in-place during compression of next blocks, - it's possible to copy the last blocks into a more stable memory space, using LZ4_saveDictHC(). - Return value of LZ4_saveDictHC() is the size of dictionary effectively saved into 'safeBuffer' (<= 64 KB) - - After completing a streaming compression, - it's possible to start a new stream of blocks, using the same LZ4_streamHC_t state, - just by resetting it, using LZ4_resetStreamHC_fast(). -*/ - -LZ4LIB_API void LZ4_resetStreamHC_fast(LZ4_streamHC_t* streamHCPtr, int compressionLevel); /* v1.9.0+ */ -LZ4LIB_API int LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* dictionary, int dictSize); - -LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr, - const char* src, char* dst, - int srcSize, int maxDstSize); - -/*! LZ4_compress_HC_continue_destSize() : v1.9.0+ - * Similar to LZ4_compress_HC_continue(), - * but will read as much data as possible from `src` - * to fit into `targetDstSize` budget. - * Result is provided into 2 parts : - * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize) - * or 0 if compression fails. - * `srcSizePtr` : on success, *srcSizePtr will be updated to indicate how much bytes were read from `src`. - * Note that this function may not consume the entire input. - */ -LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr, - const char* src, char* dst, - int* srcSizePtr, int targetDstSize); - -LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize); - - - -/*^********************************************** - * !!!!!! STATIC LINKING ONLY !!!!!! - ***********************************************/ - -/*-****************************************************************** - * PRIVATE DEFINITIONS : - * Do not use these definitions directly. - * They are merely exposed to allow static allocation of `LZ4_streamHC_t`. - * Declare an `LZ4_streamHC_t` directly, rather than any type below. - * Even then, only do so in the context of static linking, as definitions may change between versions. - ********************************************************************/ - -#define LZ4HC_DICTIONARY_LOGSIZE 16 -#define LZ4HC_MAXD (1<= LZ4HC_CLEVEL_OPT_MIN. - */ -LZ4LIB_STATIC_API void LZ4_favorDecompressionSpeed( - LZ4_streamHC_t* LZ4_streamHCPtr, int favor); - -/*! LZ4_resetStreamHC_fast() : v1.9.0+ - * When an LZ4_streamHC_t is known to be in a internally coherent state, - * it can often be prepared for a new compression with almost no work, only - * sometimes falling back to the full, expensive reset that is always required - * when the stream is in an indeterminate state (i.e., the reset performed by - * LZ4_resetStreamHC()). - * - * LZ4_streamHCs are guaranteed to be in a valid state when: - * - returned from LZ4_createStreamHC() - * - reset by LZ4_resetStreamHC() - * - memset(stream, 0, sizeof(LZ4_streamHC_t)) - * - the stream was in a valid state and was reset by LZ4_resetStreamHC_fast() - * - the stream was in a valid state and was then used in any compression call - * that returned success - * - the stream was in an indeterminate state and was used in a compression - * call that fully reset the state (LZ4_compress_HC_extStateHC()) and that - * returned success - * - * Note: - * A stream that was last used in a compression call that returned an error - * may be passed to this function. However, it will be fully reset, which will - * clear any existing history and settings from the context. - */ -LZ4LIB_STATIC_API void LZ4_resetStreamHC_fast( - LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel); - -/*! LZ4_compress_HC_extStateHC_fastReset() : - * A variant of LZ4_compress_HC_extStateHC(). - * - * Using this variant avoids an expensive initialization step. It is only safe - * to call if the state buffer is known to be correctly initialized already - * (see above comment on LZ4_resetStreamHC_fast() for a definition of - * "correctly initialized"). From a high level, the difference is that this - * function initializes the provided state with a call to - * LZ4_resetStreamHC_fast() while LZ4_compress_HC_extStateHC() starts with a - * call to LZ4_resetStreamHC(). - */ -LZ4LIB_STATIC_API int LZ4_compress_HC_extStateHC_fastReset ( - void* state, - const char* src, char* dst, - int srcSize, int dstCapacity, - int compressionLevel); - -/*! LZ4_attach_HC_dictionary() : - * This is an experimental API that allows for the efficient use of a - * static dictionary many times. - * - * Rather than re-loading the dictionary buffer into a working context before - * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a - * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism, - * in which the working stream references the dictionary stream in-place. - * - * Several assumptions are made about the state of the dictionary stream. - * Currently, only streams which have been prepared by LZ4_loadDictHC() should - * be expected to work. - * - * Alternatively, the provided dictionary stream pointer may be NULL, in which - * case any existing dictionary stream is unset. - * - * A dictionary should only be attached to a stream without any history (i.e., - * a stream that has just been reset). - * - * The dictionary will remain attached to the working stream only for the - * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the - * dictionary context association from the working stream. The dictionary - * stream (and source buffer) must remain in-place / accessible / unchanged - * through the lifetime of the stream session. - */ -LZ4LIB_STATIC_API void LZ4_attach_HC_dictionary( - LZ4_streamHC_t *working_stream, - const LZ4_streamHC_t *dictionary_stream); - -#if defined (__cplusplus) -} -#endif - -#endif /* LZ4_HC_SLO_098092834 */ -#endif /* LZ4_HC_STATIC_LINKING_ONLY */ diff --git a/CompressSave/LZ4WrapC/lz4/xxhash.c b/CompressSave/LZ4WrapC/lz4/xxhash.c deleted file mode 100644 index ff28749..0000000 --- a/CompressSave/LZ4WrapC/lz4/xxhash.c +++ /dev/null @@ -1,1030 +0,0 @@ -/* -* xxHash - Fast Hash algorithm -* Copyright (C) 2012-2016, Yann Collet -* -* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are -* met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following disclaimer -* in the documentation and/or other materials provided with the -* distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -* -* You can contact the author at : -* - xxHash homepage: http://www.xxhash.com -* - xxHash source repository : https://github.com/Cyan4973/xxHash -*/ - - -/* ************************************* -* Tuning parameters -***************************************/ -/*!XXH_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. - * It can generate buggy code on targets which do not support unaligned memory accesses. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://stackoverflow.com/a/32095106/646947 for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ - || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ - || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define XXH_FORCE_MEMORY_ACCESS 2 -# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ - || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ - || defined(__ARM_ARCH_7S__) )) -# define XXH_FORCE_MEMORY_ACCESS 1 -# endif -#endif - -/*!XXH_ACCEPT_NULL_INPUT_POINTER : - * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault. - * When this macro is enabled, xxHash actively checks input for null pointer. - * It it is, result for null input pointers is the same as a null-length input. - */ -#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ -# define XXH_ACCEPT_NULL_INPUT_POINTER 0 -#endif - -/*!XXH_FORCE_NATIVE_FORMAT : - * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. - * Results are therefore identical for little-endian and big-endian CPU. - * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. - * Should endian-independence be of no importance for your application, you may set the #define below to 1, - * to improve speed for Big-endian CPU. - * This option has no impact on Little_Endian CPU. - */ -#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ -# define XXH_FORCE_NATIVE_FORMAT 0 -#endif - -/*!XXH_FORCE_ALIGN_CHECK : - * This is a minor performance trick, only useful with lots of very small keys. - * It means : check for aligned/unaligned input. - * The check costs one initial branch per hash; - * set it to 0 when the input is guaranteed to be aligned, - * or when alignment doesn't matter for performance. - */ -#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ -# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) -# define XXH_FORCE_ALIGN_CHECK 0 -# else -# define XXH_FORCE_ALIGN_CHECK 1 -# endif -#endif - - -/* ************************************* -* Includes & Memory related functions -***************************************/ -/*! Modify the local functions below should you wish to use some other memory routines -* for malloc(), free() */ -#include -static void* XXH_malloc(size_t s) { return malloc(s); } -static void XXH_free (void* p) { free(p); } -/*! and for memcpy() */ -#include -static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } - -#include /* assert */ - -#define XXH_STATIC_LINKING_ONLY -#include "xxhash.h" - - -/* ************************************* -* Compiler Specific Options -***************************************/ -#ifdef _MSC_VER /* Visual Studio */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -# define FORCE_INLINE static __forceinline -#else -# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# ifdef __GNUC__ -# define FORCE_INLINE static inline __attribute__((always_inline)) -# else -# define FORCE_INLINE static inline -# endif -# else -# define FORCE_INLINE static -# endif /* __STDC_VERSION__ */ -#endif - - -/* ************************************* -* Basic Types -***************************************/ -#ifndef MEM_MODULE -# if !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint8_t BYTE; - typedef uint16_t U16; - typedef uint32_t U32; -# else - typedef unsigned char BYTE; - typedef unsigned short U16; - typedef unsigned int U32; -# endif -#endif - -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) - -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } - -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U32 u32; } __attribute__((packed)) unalign; -static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } - -#else - -/* portable and safe solution. Generally efficient. - * see : http://stackoverflow.com/a/32095106/646947 - */ -static U32 XXH_read32(const void* memPtr) -{ - U32 val; - memcpy(&val, memPtr, sizeof(val)); - return val; -} - -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ - - -/* **************************************** -* Compiler-specific Functions and Macros -******************************************/ -#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) - -/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ -#if defined(_MSC_VER) -# define XXH_rotl32(x,r) _rotl(x,r) -# define XXH_rotl64(x,r) _rotl64(x,r) -#else -# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) -# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) -#endif - -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap32 _byteswap_ulong -#elif XXH_GCC_VERSION >= 403 -# define XXH_swap32 __builtin_bswap32 -#else -static U32 XXH_swap32 (U32 x) -{ - return ((x << 24) & 0xff000000 ) | - ((x << 8) & 0x00ff0000 ) | - ((x >> 8) & 0x0000ff00 ) | - ((x >> 24) & 0x000000ff ); -} -#endif - - -/* ************************************* -* Architecture Macros -***************************************/ -typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; - -/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ -#ifndef XXH_CPU_LITTLE_ENDIAN -static int XXH_isLittleEndian(void) -{ - const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ - return one.c[0]; -} -# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() -#endif - - -/* *************************** -* Memory reads -*****************************/ -typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; - -FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) -{ - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); - else - return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); -} - -FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) -{ - return XXH_readLE32_align(ptr, endian, XXH_unaligned); -} - -static U32 XXH_readBE32(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); -} - - -/* ************************************* -* Macros -***************************************/ -#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */ -XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } - - -/* ******************************************************************* -* 32-bit hash functions -*********************************************************************/ -static const U32 PRIME32_1 = 2654435761U; -static const U32 PRIME32_2 = 2246822519U; -static const U32 PRIME32_3 = 3266489917U; -static const U32 PRIME32_4 = 668265263U; -static const U32 PRIME32_5 = 374761393U; - -static U32 XXH32_round(U32 seed, U32 input) -{ - seed += input * PRIME32_2; - seed = XXH_rotl32(seed, 13); - seed *= PRIME32_1; - return seed; -} - -/* mix all bits */ -static U32 XXH32_avalanche(U32 h32) -{ - h32 ^= h32 >> 15; - h32 *= PRIME32_2; - h32 ^= h32 >> 13; - h32 *= PRIME32_3; - h32 ^= h32 >> 16; - return(h32); -} - -#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) - -static U32 -XXH32_finalize(U32 h32, const void* ptr, size_t len, - XXH_endianess endian, XXH_alignment align) - -{ - const BYTE* p = (const BYTE*)ptr; - -#define PROCESS1 \ - h32 += (*p++) * PRIME32_5; \ - h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; - -#define PROCESS4 \ - h32 += XXH_get32bits(p) * PRIME32_3; \ - p+=4; \ - h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; - - switch(len&15) /* or switch(bEnd - p) */ - { - case 12: PROCESS4; - /* fallthrough */ - case 8: PROCESS4; - /* fallthrough */ - case 4: PROCESS4; - return XXH32_avalanche(h32); - - case 13: PROCESS4; - /* fallthrough */ - case 9: PROCESS4; - /* fallthrough */ - case 5: PROCESS4; - PROCESS1; - return XXH32_avalanche(h32); - - case 14: PROCESS4; - /* fallthrough */ - case 10: PROCESS4; - /* fallthrough */ - case 6: PROCESS4; - PROCESS1; - PROCESS1; - return XXH32_avalanche(h32); - - case 15: PROCESS4; - /* fallthrough */ - case 11: PROCESS4; - /* fallthrough */ - case 7: PROCESS4; - /* fallthrough */ - case 3: PROCESS1; - /* fallthrough */ - case 2: PROCESS1; - /* fallthrough */ - case 1: PROCESS1; - /* fallthrough */ - case 0: return XXH32_avalanche(h32); - } - assert(0); - return h32; /* reaching this point is deemed impossible */ -} - - -FORCE_INLINE U32 -XXH32_endian_align(const void* input, size_t len, U32 seed, - XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* bEnd = p + len; - U32 h32; - -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - if (p==NULL) { - len=0; - bEnd=p=(const BYTE*)(size_t)16; - } -#endif - - if (len>=16) { - const BYTE* const limit = bEnd - 15; - U32 v1 = seed + PRIME32_1 + PRIME32_2; - U32 v2 = seed + PRIME32_2; - U32 v3 = seed + 0; - U32 v4 = seed - PRIME32_1; - - do { - v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; - v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; - v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; - v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; - } while (p < limit); - - h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) - + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); - } else { - h32 = seed + PRIME32_5; - } - - h32 += (U32)len; - - return XXH32_finalize(h32, p, len&15, endian, align); -} - - -XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) -{ -#if 0 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH32_state_t state; - XXH32_reset(&state, seed); - XXH32_update(&state, input, len); - return XXH32_digest(&state); -#else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); - else - return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } } - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); - else - return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); -#endif -} - - - -/*====== Hash streaming ======*/ - -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) -{ - return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); -} -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} - -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) -{ - memcpy(dstState, srcState, sizeof(*dstState)); -} - -XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) -{ - XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - memset(&state, 0, sizeof(state)); - state.v1 = seed + PRIME32_1 + PRIME32_2; - state.v2 = seed + PRIME32_2; - state.v3 = seed + 0; - state.v4 = seed - PRIME32_1; - /* do not write into reserved, planned to be removed in a future version */ - memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); - return XXH_OK; -} - - -FORCE_INLINE XXH_errorcode -XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) -{ - if (input==NULL) -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - return XXH_OK; -#else - return XXH_ERROR; -#endif - - { const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - - state->total_len_32 += (unsigned)len; - state->large_len |= (len>=16) | (state->total_len_32>=16); - - if (state->memsize + len < 16) { /* fill in tmp buffer */ - XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); - state->memsize += (unsigned)len; - return XXH_OK; - } - - if (state->memsize) { /* some data left from previous update */ - XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); - { const U32* p32 = state->mem32; - state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; - state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; - state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; - state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); - } - p += 16-state->memsize; - state->memsize = 0; - } - - if (p <= bEnd-16) { - const BYTE* const limit = bEnd - 16; - U32 v1 = state->v1; - U32 v2 = state->v2; - U32 v3 = state->v3; - U32 v4 = state->v4; - - do { - v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; - v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; - v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; - v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; - } while (p<=limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } - - if (p < bEnd) { - XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - } - - return XXH_OK; -} - - -XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH32_update_endian(state_in, input, len, XXH_bigEndian); -} - - -FORCE_INLINE U32 -XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) -{ - U32 h32; - - if (state->large_len) { - h32 = XXH_rotl32(state->v1, 1) - + XXH_rotl32(state->v2, 7) - + XXH_rotl32(state->v3, 12) - + XXH_rotl32(state->v4, 18); - } else { - h32 = state->v3 /* == seed */ + PRIME32_5; - } - - h32 += state->total_len_32; - - return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned); -} - - -XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_digest_endian(state_in, XXH_littleEndian); - else - return XXH32_digest_endian(state_in, XXH_bigEndian); -} - - -/*====== Canonical representation ======*/ - -/*! Default XXH result types are basic unsigned 32 and 64 bits. -* The canonical representation follows human-readable write convention, aka big-endian (large digits first). -* These functions allow transformation of hash result into and from its canonical format. -* This way, hash values can be written into a file or buffer, remaining comparable across different systems. -*/ - -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); - memcpy(dst, &hash, sizeof(*dst)); -} - -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) -{ - return XXH_readBE32(src); -} - - -#ifndef XXH_NO_LONG_LONG - -/* ******************************************************************* -* 64-bit hash functions -*********************************************************************/ - -/*====== Memory access ======*/ - -#ifndef MEM_MODULE -# define MEM_MODULE -# if !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint64_t U64; -# else - /* if compiler doesn't support unsigned long long, replace by another 64-bit type */ - typedef unsigned long long U64; -# endif -#endif - - -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) - -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } - -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64; -static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } - -#else - -/* portable and safe solution. Generally efficient. - * see : http://stackoverflow.com/a/32095106/646947 - */ - -static U64 XXH_read64(const void* memPtr) -{ - U64 val; - memcpy(&val, memPtr, sizeof(val)); - return val; -} - -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ - -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap64 _byteswap_uint64 -#elif XXH_GCC_VERSION >= 403 -# define XXH_swap64 __builtin_bswap64 -#else -static U64 XXH_swap64 (U64 x) -{ - return ((x << 56) & 0xff00000000000000ULL) | - ((x << 40) & 0x00ff000000000000ULL) | - ((x << 24) & 0x0000ff0000000000ULL) | - ((x << 8) & 0x000000ff00000000ULL) | - ((x >> 8) & 0x00000000ff000000ULL) | - ((x >> 24) & 0x0000000000ff0000ULL) | - ((x >> 40) & 0x000000000000ff00ULL) | - ((x >> 56) & 0x00000000000000ffULL); -} -#endif - -FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) -{ - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); - else - return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); -} - -FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) -{ - return XXH_readLE64_align(ptr, endian, XXH_unaligned); -} - -static U64 XXH_readBE64(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); -} - - -/*====== xxh64 ======*/ - -static const U64 PRIME64_1 = 11400714785074694791ULL; -static const U64 PRIME64_2 = 14029467366897019727ULL; -static const U64 PRIME64_3 = 1609587929392839161ULL; -static const U64 PRIME64_4 = 9650029242287828579ULL; -static const U64 PRIME64_5 = 2870177450012600261ULL; - -static U64 XXH64_round(U64 acc, U64 input) -{ - acc += input * PRIME64_2; - acc = XXH_rotl64(acc, 31); - acc *= PRIME64_1; - return acc; -} - -static U64 XXH64_mergeRound(U64 acc, U64 val) -{ - val = XXH64_round(0, val); - acc ^= val; - acc = acc * PRIME64_1 + PRIME64_4; - return acc; -} - -static U64 XXH64_avalanche(U64 h64) -{ - h64 ^= h64 >> 33; - h64 *= PRIME64_2; - h64 ^= h64 >> 29; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - return h64; -} - - -#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) - -static U64 -XXH64_finalize(U64 h64, const void* ptr, size_t len, - XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)ptr; - -#define PROCESS1_64 \ - h64 ^= (*p++) * PRIME64_5; \ - h64 = XXH_rotl64(h64, 11) * PRIME64_1; - -#define PROCESS4_64 \ - h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \ - p+=4; \ - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - -#define PROCESS8_64 { \ - U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \ - p+=8; \ - h64 ^= k1; \ - h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \ -} - - switch(len&31) { - case 24: PROCESS8_64; - /* fallthrough */ - case 16: PROCESS8_64; - /* fallthrough */ - case 8: PROCESS8_64; - return XXH64_avalanche(h64); - - case 28: PROCESS8_64; - /* fallthrough */ - case 20: PROCESS8_64; - /* fallthrough */ - case 12: PROCESS8_64; - /* fallthrough */ - case 4: PROCESS4_64; - return XXH64_avalanche(h64); - - case 25: PROCESS8_64; - /* fallthrough */ - case 17: PROCESS8_64; - /* fallthrough */ - case 9: PROCESS8_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 29: PROCESS8_64; - /* fallthrough */ - case 21: PROCESS8_64; - /* fallthrough */ - case 13: PROCESS8_64; - /* fallthrough */ - case 5: PROCESS4_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 26: PROCESS8_64; - /* fallthrough */ - case 18: PROCESS8_64; - /* fallthrough */ - case 10: PROCESS8_64; - PROCESS1_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 30: PROCESS8_64; - /* fallthrough */ - case 22: PROCESS8_64; - /* fallthrough */ - case 14: PROCESS8_64; - /* fallthrough */ - case 6: PROCESS4_64; - PROCESS1_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 27: PROCESS8_64; - /* fallthrough */ - case 19: PROCESS8_64; - /* fallthrough */ - case 11: PROCESS8_64; - PROCESS1_64; - PROCESS1_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 31: PROCESS8_64; - /* fallthrough */ - case 23: PROCESS8_64; - /* fallthrough */ - case 15: PROCESS8_64; - /* fallthrough */ - case 7: PROCESS4_64; - /* fallthrough */ - case 3: PROCESS1_64; - /* fallthrough */ - case 2: PROCESS1_64; - /* fallthrough */ - case 1: PROCESS1_64; - /* fallthrough */ - case 0: return XXH64_avalanche(h64); - } - - /* impossible to reach */ - assert(0); - return 0; /* unreachable, but some compilers complain without it */ -} - -FORCE_INLINE U64 -XXH64_endian_align(const void* input, size_t len, U64 seed, - XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* bEnd = p + len; - U64 h64; - -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - if (p==NULL) { - len=0; - bEnd=p=(const BYTE*)(size_t)32; - } -#endif - - if (len>=32) { - const BYTE* const limit = bEnd - 32; - U64 v1 = seed + PRIME64_1 + PRIME64_2; - U64 v2 = seed + PRIME64_2; - U64 v3 = seed + 0; - U64 v4 = seed - PRIME64_1; - - do { - v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; - v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; - v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; - v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; - } while (p<=limit); - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - - } else { - h64 = seed + PRIME64_5; - } - - h64 += (U64) len; - - return XXH64_finalize(h64, p, len, endian, align); -} - - -XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) -{ -#if 0 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH64_state_t state; - XXH64_reset(&state, seed); - XXH64_update(&state, input, len); - return XXH64_digest(&state); -#else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } } - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); -#endif -} - -/*====== Hash Streaming ======*/ - -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) -{ - return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); -} -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} - -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) -{ - memcpy(dstState, srcState, sizeof(*dstState)); -} - -XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) -{ - XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - memset(&state, 0, sizeof(state)); - state.v1 = seed + PRIME64_1 + PRIME64_2; - state.v2 = seed + PRIME64_2; - state.v3 = seed + 0; - state.v4 = seed - PRIME64_1; - /* do not write into reserved, planned to be removed in a future version */ - memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); - return XXH_OK; -} - -FORCE_INLINE XXH_errorcode -XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) -{ - if (input==NULL) -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - return XXH_OK; -#else - return XXH_ERROR; -#endif - - { const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - - state->total_len += len; - - if (state->memsize + len < 32) { /* fill in tmp buffer */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); - state->memsize += (U32)len; - return XXH_OK; - } - - if (state->memsize) { /* tmp buffer is full */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); - state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); - state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); - state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); - state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); - p += 32-state->memsize; - state->memsize = 0; - } - - if (p+32 <= bEnd) { - const BYTE* const limit = bEnd - 32; - U64 v1 = state->v1; - U64 v2 = state->v2; - U64 v3 = state->v3; - U64 v4 = state->v4; - - do { - v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; - v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; - v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; - v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; - } while (p<=limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } - - if (p < bEnd) { - XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - } - - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH64_update_endian(state_in, input, len, XXH_bigEndian); -} - -FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) -{ - U64 h64; - - if (state->total_len >= 32) { - U64 const v1 = state->v1; - U64 const v2 = state->v2; - U64 const v3 = state->v3; - U64 const v4 = state->v4; - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - } else { - h64 = state->v3 /*seed*/ + PRIME64_5; - } - - h64 += (U64) state->total_len; - - return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned); -} - -XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_digest_endian(state_in, XXH_littleEndian); - else - return XXH64_digest_endian(state_in, XXH_bigEndian); -} - - -/*====== Canonical representation ======*/ - -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); - memcpy(dst, &hash, sizeof(*dst)); -} - -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) -{ - return XXH_readBE64(src); -} - -#endif /* XXH_NO_LONG_LONG */ diff --git a/CompressSave/LZ4WrapC/lz4/xxhash.h b/CompressSave/LZ4WrapC/lz4/xxhash.h deleted file mode 100644 index d6bad94..0000000 --- a/CompressSave/LZ4WrapC/lz4/xxhash.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - xxHash - Extremely Fast Hash algorithm - Header File - Copyright (C) 2012-2016, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - xxHash source repository : https://github.com/Cyan4973/xxHash -*/ - -/* Notice extracted from xxHash homepage : - -xxHash is an extremely fast Hash algorithm, running at RAM speed limits. -It also successfully passes all tests from the SMHasher suite. - -Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) - -Name Speed Q.Score Author -xxHash 5.4 GB/s 10 -CrapWow 3.2 GB/s 2 Andrew -MumurHash 3a 2.7 GB/s 10 Austin Appleby -SpookyHash 2.0 GB/s 10 Bob Jenkins -SBox 1.4 GB/s 9 Bret Mulvey -Lookup3 1.2 GB/s 9 Bob Jenkins -SuperFastHash 1.2 GB/s 1 Paul Hsieh -CityHash64 1.05 GB/s 10 Pike & Alakuijala -FNV 0.55 GB/s 5 Fowler, Noll, Vo -CRC32 0.43 GB/s 9 -MD5-32 0.33 GB/s 10 Ronald L. Rivest -SHA1-32 0.28 GB/s 10 - -Q.Score is a measure of quality of the hash function. -It depends on successfully passing SMHasher test set. -10 is a perfect score. - -A 64-bit version, named XXH64, is available since r35. -It offers much better speed, but for 64-bit applications only. -Name Speed on 64 bits Speed on 32 bits -XXH64 13.8 GB/s 1.9 GB/s -XXH32 6.8 GB/s 6.0 GB/s -*/ - -#ifndef XXHASH_H_5627135585666179 -#define XXHASH_H_5627135585666179 1 - -#if defined (__cplusplus) -extern "C" { -#endif - - -/* **************************** -* Definitions -******************************/ -#include /* size_t */ -typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; - - -/* **************************** - * API modifier - ******************************/ -/** XXH_INLINE_ALL (and XXH_PRIVATE_API) - * This is useful to include xxhash functions in `static` mode - * in order to inline them, and remove their symbol from the public list. - * Inlining can offer dramatic performance improvement on small keys. - * Methodology : - * #define XXH_INLINE_ALL - * #include "xxhash.h" - * `xxhash.c` is automatically included. - * It's not useful to compile and link it as a separate module. - */ -#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) -# ifndef XXH_STATIC_LINKING_ONLY -# define XXH_STATIC_LINKING_ONLY -# endif -# if defined(__GNUC__) -# define XXH_PUBLIC_API static __inline __attribute__((unused)) -# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define XXH_PUBLIC_API static inline -# elif defined(_MSC_VER) -# define XXH_PUBLIC_API static __inline -# else - /* this version may generate warnings for unused static functions */ -# define XXH_PUBLIC_API static -# endif -#else -# define XXH_PUBLIC_API /* do nothing */ -#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ - -/*! XXH_NAMESPACE, aka Namespace Emulation : - * - * If you want to include _and expose_ xxHash functions from within your own library, - * but also want to avoid symbol collisions with other libraries which may also include xxHash, - * - * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library - * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). - * - * Note that no change is required within the calling program as long as it includes `xxhash.h` : - * regular symbol name will be automatically translated by this header. - */ -#ifdef XXH_NAMESPACE -# define XXH_CAT(A,B) A##B -# define XXH_NAME2(A,B) XXH_CAT(A,B) -# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) -# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) -# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) -# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) -# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) -# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) -# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) -# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) -# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) -# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) -# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) -# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) -# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) -# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) -# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) -# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) -# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) -# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) -# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) -#endif - - -/* ************************************* -* Version -***************************************/ -#define XXH_VERSION_MAJOR 0 -#define XXH_VERSION_MINOR 6 -#define XXH_VERSION_RELEASE 5 -#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) -XXH_PUBLIC_API unsigned XXH_versionNumber (void); - - -/*-********************************************************************** -* 32-bit hash -************************************************************************/ -typedef unsigned int XXH32_hash_t; - -/*! XXH32() : - Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". - The memory between input & input+length must be valid (allocated and read-accessible). - "seed" can be used to alter the result predictably. - Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */ -XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); - -/*====== Streaming ======*/ -typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); - -XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); -XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); -XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); - -/* - * Streaming functions generate the xxHash of an input provided in multiple segments. - * Note that, for small input, they are slower than single-call functions, due to state management. - * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. - * - * XXH state must first be allocated, using XXH*_createState() . - * - * Start a new hash by initializing state with a seed, using XXH*_reset(). - * - * Then, feed the hash state by calling XXH*_update() as many times as necessary. - * The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. - * - * Finally, a hash value can be produced anytime, by using XXH*_digest(). - * This function returns the nn-bits hash as an int or long long. - * - * It's still possible to continue inserting input into the hash state after a digest, - * and generate some new hashes later on, by calling again XXH*_digest(). - * - * When done, free XXH state space if it was allocated dynamically. - */ - -/*====== Canonical representation ======*/ - -typedef struct { unsigned char digest[4]; } XXH32_canonical_t; -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); - -/* Default result type for XXH functions are primitive unsigned 32 and 64 bits. - * The canonical representation uses human-readable write convention, aka big-endian (large digits first). - * These functions allow transformation of hash result into and from its canonical format. - * This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. - */ - - -#ifndef XXH_NO_LONG_LONG -/*-********************************************************************** -* 64-bit hash -************************************************************************/ -typedef unsigned long long XXH64_hash_t; - -/*! XXH64() : - Calculate the 64-bit hash of sequence of length "len" stored at memory address "input". - "seed" can be used to alter the result predictably. - This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark). -*/ -XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); - -/*====== Streaming ======*/ -typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); - -XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); -XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); - -/*====== Canonical representation ======*/ -typedef struct { unsigned char digest[8]; } XXH64_canonical_t; -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); -#endif /* XXH_NO_LONG_LONG */ - - - -#ifdef XXH_STATIC_LINKING_ONLY - -/* ================================================================================================ - This section contains declarations which are not guaranteed to remain stable. - They may change in future versions, becoming incompatible with a different version of the library. - These declarations should only be used with static linking. - Never use them in association with dynamic linking ! -=================================================================================================== */ - -/* These definitions are only present to allow - * static allocation of XXH state, on stack or in a struct for example. - * Never **ever** use members directly. */ - -#if !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - -struct XXH32_state_s { - uint32_t total_len_32; - uint32_t large_len; - uint32_t v1; - uint32_t v2; - uint32_t v3; - uint32_t v4; - uint32_t mem32[4]; - uint32_t memsize; - uint32_t reserved; /* never read nor write, might be removed in a future version */ -}; /* typedef'd to XXH32_state_t */ - -struct XXH64_state_s { - uint64_t total_len; - uint64_t v1; - uint64_t v2; - uint64_t v3; - uint64_t v4; - uint64_t mem64[4]; - uint32_t memsize; - uint32_t reserved[2]; /* never read nor write, might be removed in a future version */ -}; /* typedef'd to XXH64_state_t */ - -# else - -struct XXH32_state_s { - unsigned total_len_32; - unsigned large_len; - unsigned v1; - unsigned v2; - unsigned v3; - unsigned v4; - unsigned mem32[4]; - unsigned memsize; - unsigned reserved; /* never read nor write, might be removed in a future version */ -}; /* typedef'd to XXH32_state_t */ - -# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */ -struct XXH64_state_s { - unsigned long long total_len; - unsigned long long v1; - unsigned long long v2; - unsigned long long v3; - unsigned long long v4; - unsigned long long mem64[4]; - unsigned memsize; - unsigned reserved[2]; /* never read nor write, might be removed in a future version */ -}; /* typedef'd to XXH64_state_t */ -# endif - -# endif - - -#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) -# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */ -#endif - -#endif /* XXH_STATIC_LINKING_ONLY */ - - -#if defined (__cplusplus) -} -#endif - -#endif /* XXHASH_H_5627135585666179 */ diff --git a/CompressSave/NoneWrapC/CMakeLists.txt b/CompressSave/NoneWrapC/CMakeLists.txt deleted file mode 100644 index 5f5fce5..0000000 --- a/CompressSave/NoneWrapC/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -cmake_minimum_required(VERSION 3.2) - -project(nonewrap) - -add_library(nonewrap SHARED - dllmain.c nonewrap.c nonewrap.h) - -target_compile_definitions(nonewrap PRIVATE NONEWRAP_EXPORTS) -if(WIN32) - set_target_properties(nonewrap PROPERTIES PREFIX "") -endif() diff --git a/CompressSave/NoneWrapC/dllmain.c b/CompressSave/NoneWrapC/dllmain.c deleted file mode 100644 index 896c9b5..0000000 --- a/CompressSave/NoneWrapC/dllmain.c +++ /dev/null @@ -1,20 +0,0 @@ -// dllmain.cpp : Defines the entry point for the DLL application. -#include - -BOOL APIENTRY DllMain( HMODULE hModule, - DWORD ul_reason_for_call, - LPVOID lpReserved - ) -{ - switch (ul_reason_for_call) - { - case DLL_PROCESS_ATTACH: - DisableThreadLibraryCalls(hModule); - break; - case DLL_THREAD_ATTACH: - case DLL_THREAD_DETACH: - case DLL_PROCESS_DETACH: - break; - } - return TRUE; -} diff --git a/CompressSave/NoneWrapC/nonewrap.c b/CompressSave/NoneWrapC/nonewrap.c deleted file mode 100644 index 85bcc34..0000000 --- a/CompressSave/NoneWrapC/nonewrap.c +++ /dev/null @@ -1,62 +0,0 @@ -#include "nonewrap.h" - -#include -#include -#include -#include - -size_t __stdcall CompressBufferBound(size_t inBufferSize) -{ - return inBufferSize * 4; -} - -size_t __stdcall CompressBegin(Context** pctx, int compressionLevel, void* outBuff, size_t outCapacity, void* dict, size_t dictSize) -{ - Context *ctx = (Context *)malloc(sizeof(Context)); - if (ctx == NULL) return -1; - *pctx = ctx; - return 0; -} - -size_t __stdcall CompressUpdate(Context* ctx,void* dstBuffer, size_t dstCapacity,const void* srcBuffer, size_t srcSize) -{ - memcpy(dstBuffer, srcBuffer, srcSize); - return srcSize; -} - -size_t __stdcall CompressEnd(Context* ctx, void* dstBuffer, size_t dstCapacity) -{ - return 0; -} - -void __stdcall CompressContextFree(Context* ctx) -{ - free(ctx); -} - -size_t __stdcall DecompressBegin(Context **pdctx,void *inBuffer,size_t *inBufferSize, size_t *blockSize, void* dict, size_t dictSize) -{ - Context *ctx = (Context *)malloc(sizeof(Context)); - if (ctx == NULL) return -1; - *pdctx = ctx; - *inBufferSize = 0; - *blockSize = 4 * 512 * 1024; - return 0; -} - -void __stdcall DecompressContextReset(Context* dctx) -{ -} - -size_t __stdcall DecompressUpdate(Context* dctx, void* outBuffer, size_t * outBufferSize, void* inBuffer, size_t * inBufferSize) -{ - memcpy(outBuffer, inBuffer, *inBufferSize); - *outBufferSize = *inBufferSize; - return 1; -} - -size_t __stdcall DecompressEnd(Context* ctx) -{ - free(ctx); - return 0; -} \ No newline at end of file diff --git a/CompressSave/NoneWrapC/nonewrap.h b/CompressSave/NoneWrapC/nonewrap.h deleted file mode 100644 index 9910e8d..0000000 --- a/CompressSave/NoneWrapC/nonewrap.h +++ /dev/null @@ -1,36 +0,0 @@ -#pragma once - -#include - -#if defined(__cplusplus) -#define API_EXTERN_C extern "C" -#else -#define API_EXTERN_C -#endif -#ifdef NONEWRAP_EXPORTS -#define NONEAPI API_EXTERN_C __declspec(dllexport) -#else -#define NONEAPI API_EXTERN_C __declspec(dllimport) -#endif - -typedef struct { - int n; -} Context; - -NONEAPI void __stdcall CompressContextFree(Context* ctx); - -NONEAPI size_t __stdcall CompressBufferBound(size_t inBufferSize); - -NONEAPI size_t __stdcall CompressBegin(Context** ctx, int compressionLevel, void* outBuff, size_t outCapacity, void* dict, size_t dictSize); - -NONEAPI size_t __stdcall CompressUpdate(Context* ctx, void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize); - -NONEAPI size_t __stdcall CompressEnd(Context* ctx, void* dstBuffer, size_t dstCapacity); - -NONEAPI size_t __stdcall DecompressBegin(Context** pdctx, void* inBuffer, size_t* inBufferSize, size_t* blockSize, void* dict, size_t dictSize); - -NONEAPI void __stdcall DecompressContextReset(Context* dctx); - -NONEAPI size_t __stdcall DecompressUpdate(Context* dctx, void* outBuffer, size_t* outBufferSize, void* inBuffer, size_t* inBufferSize); - -NONEAPI size_t __stdcall DecompressEnd(Context* dctx); diff --git a/CompressSave/PatchUILoadGame.cs b/CompressSave/PatchUILoadGame.cs deleted file mode 100644 index 739992e..0000000 --- a/CompressSave/PatchUILoadGame.cs +++ /dev/null @@ -1,83 +0,0 @@ -using System.Linq; -using HarmonyLib; -using UnityEngine; -using UnityEngine.UI; - -namespace CompressSave; - -class PatchUILoadGame -{ - static UIButton _decompressButton; - - [HarmonyPatch(typeof(UILoadGameWindow), "OnSelectedChange"), HarmonyPostfix] - private static void OnSelectedChange(UILoadGameWindow __instance) - { - var selected = __instance.selected; - var compressedType = SaveUtil.SaveGetCompressType(selected == null ? null : selected.saveName); - var prop3Text = __instance.prop3Text; - prop3Text.text = compressedType switch - { - CompressionType.LZ4 => "(LZ4)" + prop3Text.text, - CompressionType.Zstd => "(ZSTD)" + prop3Text.text, - _ => "(N)" + prop3Text.text - }; - if (!_decompressButton) return; - _decompressButton.button.interactable = compressedType != CompressionType.None; - _decompressButton.gameObject.SetActive(compressedType != CompressionType.None); - } - - [HarmonyPatch(typeof(UILoadGameWindow), "_OnOpen"), HarmonyPostfix] - static void _OnOpen(UILoadGameWindow __instance) - { - if (_decompressButton) return; - var loadButton = __instance.loadButton; - - var created = false; - var gameObj = __instance.transform.Find("button-decompress")?.gameObject; - if (gameObj == null) - { - gameObj = Object.Instantiate(loadButton.gameObject, loadButton.transform.parent); - created = true; - } - - _decompressButton = gameObj.GetComponent(); - - if (created) - { - var rtrans = (RectTransform)__instance.loadSandboxGroup.transform; - var anchoredPosition3D = rtrans.anchoredPosition3D; - _decompressButton.gameObject.name = "button-decompress"; - rtrans = (RectTransform)_decompressButton.transform; - var pos = anchoredPosition3D; - anchoredPosition3D = new Vector3(pos.x - 230, pos.y, pos.z); - rtrans.anchoredPosition3D = anchoredPosition3D; - _decompressButton.button.image.color = new Color32(0, 0xf4, 0x92, 0x77); - var textTrans = _decompressButton.transform.Find("button-text"); - var text = textTrans.GetComponent(); - text.text = "Decompress".Translate(); - var localizer = textTrans.GetComponent(); - if (localizer) - { - localizer.stringKey = "Decompress"; - localizer.translation = "Decompress".Translate(); - } - - _decompressButton.onClick += _ => - { - if (!SaveUtil.DecompressSave(__instance.selected.saveName, out var newfileName)) return; - __instance.RefreshList(); - __instance.selected = __instance.entries.First(e => e.saveName == newfileName); - }; - } - - _decompressButton.button.interactable = false; - _decompressButton.gameObject.SetActive(false); - } - - public static void OnDestroy() - { - if (_decompressButton) - Object.Destroy(_decompressButton.gameObject); - _decompressButton = null; - } -} \ No newline at end of file diff --git a/CompressSave/PatchUISaveGame.cs b/CompressSave/PatchUISaveGame.cs deleted file mode 100644 index 284796b..0000000 --- a/CompressSave/PatchUISaveGame.cs +++ /dev/null @@ -1,237 +0,0 @@ -using HarmonyLib; -using UnityEngine; -using UnityEngine.UI; - -namespace CompressSave; - -static class PatchUISaveGame -{ - public static void OnDestroy() - { - if (_context.ButtonCompress) - Object.Destroy(_context.ButtonCompress.gameObject); - if (_context.Window) - { - _context.SaveButton.onClick -= WrapClick; - _context.SaveButton.onClick += _context.Window.OnSaveClick; - } - _OnDestroy(); - } - - [HarmonyPatch(typeof(UISaveGameWindow), "OnSelectedChange"), HarmonyPostfix] - private static void OnSelectedChange(UISaveGameWindow __instance) - { - var selected = __instance.selected; - var compressedType = SaveUtil.SaveGetCompressType(selected == null ? null : selected.saveName); - var prop3Text = __instance.prop3Text; - prop3Text.text = compressedType switch - { - CompressionType.LZ4 => "(LZ4)" + prop3Text.text, - CompressionType.Zstd => "(ZSTD)" + prop3Text.text, - _ => "(N)" + prop3Text.text - }; - } - - [HarmonyPatch(typeof(UISaveGameWindow), "_OnDestroy"), HarmonyPostfix] - private static void _OnDestroy() - { - //Console.WriteLine("OnCreate"); - _context = new UIContext(); - } - - [HarmonyPatch(typeof(UISaveGameWindow), "OnSaveClick"), HarmonyReversePatch] - private static void OSaveGameAs(this UISaveGameWindow ui, int data) { } - - [HarmonyPatch(typeof(UISaveGameWindow), "CheckAndSetSaveButtonEnable"), HarmonyPostfix] - private static void CheckAndSetSaveButtonEnable(UISaveGameWindow __instance) - { - _OnOpen(__instance); - if (_context.SaveButton) - _context.ButtonCompress.button.interactable = _context.SaveButton.button.interactable; - } - - private class UIContext - { - public UIButton ButtonCompress; - public UIButton SaveButton; - public GameObject ManualSaveTypeComboBox; - public GameObject AutoSaveTypeComboBox; - public Text ButtonCompressText; - public Text SaveButtonText; - public UISaveGameWindow Window; - } - - [HarmonyPatch(typeof(UISaveGameWindow), "OnSaveClick"), HarmonyPrefix] - private static void OnSaveClick() - { - PatchSave.UseCompressSave = true; - } - - private static UIContext _context = new UIContext(); - - [HarmonyPatch(typeof(UISaveGameWindow), "_OnOpen"), HarmonyPostfix] - private static void _OnOpen(UISaveGameWindow __instance) - { - if (_context.ButtonCompress) return; - RectTransform rtrans; - Vector3 pos; - _context.SaveButton = __instance.saveButton; - _context.SaveButtonText = __instance.saveButtonText; - _context.Window = __instance; - var gameObj = __instance.transform.Find("button-compress")?.gameObject; - var created = false; - if (gameObj == null) - { - gameObj = Object.Instantiate(__instance.saveButton.gameObject, __instance.saveButton.transform.parent); - created = true; - } - _context.ButtonCompress = gameObj.GetComponent(); - if (created) - { - _context.ButtonCompress.gameObject.name = "button-compress"; - rtrans = (RectTransform)_context.ButtonCompress.transform; - pos = rtrans.anchoredPosition3D; - rtrans.anchoredPosition3D = new Vector3(pos.x - 180, pos.y, pos.z); - _context.ButtonCompress.button.image.color = new Color32(0xfc, 0x6f, 00, 0x77); - var textTrans = _context.ButtonCompress.transform.Find("button-text"); - _context.ButtonCompressText = textTrans.GetComponent(); - _context.ButtonCompress.onClick += __instance.OnSaveClick; - _context.SaveButton.onClick -= __instance.OnSaveClick; - _context.SaveButton.onClick += WrapClick; - _context.ButtonCompressText.text = "Save with Compression".Translate(); - var localizer = textTrans.GetComponent(); - if (localizer) - { - localizer.stringKey = "Save with Compression"; - localizer.translation = "Save with Compression".Translate(); - } - } - - created = false; - gameObj = __instance.transform.Find("manual-save-type-combobox")?.gameObject; - if (gameObj == null) - { - gameObj = Object.Instantiate(UIRoot.instance.optionWindow.resolutionComp.transform.parent.gameObject, __instance.saveButton.transform.parent); - created = true; - } - _context.ManualSaveTypeComboBox = gameObj; - if (created) - { - gameObj.name = "manual-save-type-combobox"; - rtrans = (RectTransform)gameObj.transform; - var rtrans2 = (RectTransform)_context.ButtonCompress.transform; - pos = rtrans2.anchoredPosition3D; - rtrans.anchorMin = rtrans2.anchorMin; - rtrans.anchorMax = rtrans2.anchorMax; - rtrans.pivot = rtrans2.pivot; - rtrans.anchoredPosition3D = new Vector3(pos.x + 100, pos.y + 45, pos.z); - var cbctrl = rtrans.transform.Find("ComboBox"); - var content = cbctrl.Find("Dropdown List ScrollBox")?.Find("Mask")?.Find("Content Panel"); - if (content != null) - { - for (var i = content.childCount - 1; i >= 0; i--) - { - var theTrans = content.GetChild(i); - if (theTrans.name == "Item Button(Clone)") - { - Object.Destroy(theTrans.gameObject); - } - } - } - var cb = cbctrl.GetComponent(); - cb.onSubmit.RemoveAllListeners(); - cb.onItemIndexChange.RemoveAllListeners(); - cb.Items = ["Store".Translate(), "LZ4", "Zstd"]; - cb.itemIndex = (int)PatchSave.CompressionTypeForSaves; - cb.onItemIndexChange.AddListener(()=> - { - PatchSave.CompressionTypeForSaves = (CompressionType)cb.itemIndex; - PatchSave.CompressionTypeForSavesConfig.Value = CompressSave.StringFromCompresstionType(PatchSave.CompressionTypeForSaves); - }); - rtrans = (RectTransform)cb.transform; - pos = rtrans.anchoredPosition3D; - rtrans.anchoredPosition3D = new Vector3(pos.x - 50, pos.y, pos.z); - var size = rtrans.sizeDelta; - rtrans.sizeDelta = new Vector2(150f, size.y); - var txt = gameObj.GetComponent(); - txt.text = "Compression for manual saves".Translate(); - var localizer = gameObj.GetComponent(); - if (localizer != null) - { - localizer.stringKey = "Compression for manual saves"; - localizer.translation = "Compression for manual saves".Translate(); - } - } - - created = false; - gameObj = __instance.transform.Find("auto-save-type-combobox")?.gameObject; - if (gameObj == null) - { - gameObj = Object.Instantiate(UIRoot.instance.optionWindow.resolutionComp.transform.parent.gameObject, __instance.saveButton.transform.parent); - created = true; - } - _context.AutoSaveTypeComboBox = gameObj; - if (created) - { - gameObj.name = "auto-save-type-combobox"; - rtrans = (RectTransform)gameObj.transform; - var rtrans2 = (RectTransform)_context.ButtonCompress.transform; - pos = rtrans2.anchoredPosition3D; - rtrans.anchorMin = rtrans2.anchorMin; - rtrans.anchorMax = rtrans2.anchorMax; - rtrans.pivot = rtrans2.pivot; - rtrans.anchoredPosition3D = new Vector3(pos.x + 510, pos.y + 45, pos.z); - var cbctrl = rtrans.transform.Find("ComboBox"); - var content = cbctrl.Find("Dropdown List ScrollBox")?.Find("Mask")?.Find("Content Panel"); - if (content != null) - { - for (var i = content.childCount - 1; i >= 0; i--) - { - var theTrans = content.GetChild(i); - if (theTrans.name == "Item Button(Clone)") - { - Object.Destroy(theTrans.gameObject); - } - } - } - var cb = cbctrl.GetComponent(); - cb.onSubmit.RemoveAllListeners(); - cb.onItemIndexChange.RemoveAllListeners(); - cb.Items = ["已停用".Translate(), "Store".Translate(), "LZ4", "Zstd"]; - cb.itemIndex = PatchSave.EnableForAutoSaves.Value ? (int)PatchSave.CompressionTypeForAutoSaves + 1 : 0; - cb.onItemIndexChange.AddListener(() => - { - var idx = cb.itemIndex; - if (idx == 0) - { - PatchSave.EnableForAutoSaves.Value = false; - } - else - { - PatchSave.EnableForAutoSaves.Value = true; - PatchSave.CompressionTypeForAutoSaves = (CompressionType)idx - 1; - PatchSave.CompressionTypeForAutoSavesConfig.Value = CompressSave.StringFromCompresstionType(PatchSave.CompressionTypeForAutoSaves); - } - }); - rtrans = (RectTransform)cb.transform; - pos = rtrans.anchoredPosition3D; - rtrans.anchoredPosition3D = new Vector3(pos.x - 50, pos.y, pos.z); - var size = rtrans.sizeDelta; - rtrans.sizeDelta = new Vector2(150f, size.y); - var txt = gameObj.GetComponent(); - txt.text = "Compression for auto saves".Translate(); - var localizer = gameObj.GetComponent(); - if (localizer != null) - { - localizer.stringKey = "Compression for auto saves"; - localizer.translation = "Compression for auto saves".Translate(); - } - } - } - - private static void WrapClick(int data) - { - PatchSave.UseCompressSave = false; - _context.Window.OSaveGameAs(data); - } -} diff --git a/CompressSave/README.md b/CompressSave/README.md index 86f84de..60021ea 100644 --- a/CompressSave/README.md +++ b/CompressSave/README.md @@ -1,198 +1,3 @@ # CompressSave -#### Compress game saves to reduce space use and boost save speed -#### Original by [@bluedoom](https://github.com/bluedoom/DSP_Mod)(till 1.1.11) and [@starfi5h](https://github.com/starfi5h/DSP_CompressSave)(1.1.12), I just update it to support latest game version. -#### 压缩游戏存档以降低空间使用并提升保存速度 -#### 原作者 [@bluedoom](https://github.com/bluedoom/DSP_Mod)(直到1.1.11) 和 [@starfi5h](https://github.com/starfi5h/DSP_CompressSave)(1.1.12),本人继续更新以支持最新游戏版本。 - -## Changelog - -### 1.3.5 -* Fix a crash issue on choosing language other than English and Chinese. - -### 1.3.4 -* Support for game version 0.10.28.20759. - -### 1.3.3 -* Fix a display issue on combobox of compression type. - -### 1.3.2 -* Add config UI on Save Game dialog, to set compression types. -* Change button text to `Save (Compress)` for better understanding. - -### 1.3.1 -* Add config to disable feature for auto saves. -* Fix bug that first save after game start is always compressed in Zstd. - -### 1.3.0 -* Separate config entries for manual save and auto save. -* Now you can still get speed benefit while setting compression type to `None` for auto saves, and for manual saves if using the new `Save` button. - + Adds a `nonewrap.dll` for this function. -* Update `LZ4` and `Zstd` library to latest version. -* `lz4wrap.dll` and `zstdwrap.dll` are compiled using `-O3` instead of `-Os`, expect to be slightly faster but larger. - -### 1.2.2 -* Fix #4, a bug caused by non-ASCII UTF-8 characters. -* Remove use of Harmony.UnpatchAll() to avoid warnings in BepInEx log. - -### 1.2.1 -* Simplified codes to display compression type and `Decompress` button on save/load UI, making CompressSave compatible with other MODs(like GalacticScale) which override `UILoadGameWindow::OnSelectedChange()`. -* Add compression level -5 to -1 for zstd, which makes it working better than lz4(which is actually lz4hc used by lz4frame) now: - * -5 gets faster compression speed than lz4 with still a little better compression ratio. - * -1 has almost the same speed against lz4 with greater compression ratio. - * Due to bug of r2modman UI which does not support negative integer, the config value of compression level is not limited any more. -* move native wrapper DLLs into `x64` folder to avoid warning logs on loading BepInEx plugins. - -### 1.2.0 -* Match game version 0.9.27.15033. -* Add new compression type: zstd (a bit slower but get better compression ratio than lz4). -* Add config to set compression type and level(Don't use high compression levels for zstd as they are very slow). -* Hide decompress button for normal save files. -* Optimize native dlls for other compression library support: - * Unified naming rules for filenames and export functions. - * Add compression level support. - -### 1.1.14 -* Fix Sandbox info on Save/Load Panel. -* Fix DLL version info. - -### 1.1.13 - -* Match game version 0.9.26.13026. -* Move "Sandbox Mode" checkbox on Save Panel to avoid overlap. -* Avoid warning message on "Continue" button of main menu. - -### 1.1.12 - -* Match game version 0.9.25.12007. - -### 1.1.11 - -* Fix 1.1.10 package issue. - -### 1.1.10 - -* Fix 1.1.8 Archive corruption with DIY System, corrupted archives can be fixed by using \[Fix118\] mod - - Fix118: https://github.com/bluedoom/DSP_Mod/blob/master/Fix118 - -### 1.1.9 - -* CompressSave is temporarily disabled due to some error with the DIY system. - -### 1.1.8 - -* Match game version 0.9.24.11029 - -### 1.1.7 - -* Fix incorrect data on statistic panel. -* Improve performance. - -### 1.1.6 - -* fix memory leak - -### 1.1.5 (Game Version 0.8.22) - -* Match game version 0.8.22. -* Thanks [@starfi5h] for - - PatchSave now use transpiler for better robustness. - - Change version check to soft warning. - - Add PeekableReader so other mods can use BinaryReader.PeekChar(). - - Change LZ4DecompressionStream.Position behavior. Position setter i - available now. - -### 1.1.4 (Game Version 0.8.19) - -* Match game version 0.8.19. - -### 1.1.3 (2021/05/29) (Game Version 0.7.18) - -* Match game version 0.7.18. -* Fix memory leak. - -### 1.1.2 (2021/03/24) (Game Version 0.6.17) - -* Handle lz4 library missing Error - -### 1.1.1 (2021/03/17) (Game Version 0.6.17) - -* Fix Load Error - -### 1.1.0 (2021/03/17) (Game Version 0.6.17) - -* Add UI button - -## Introduction - -* Reduce archive size by 30% / save time by 75% (Compressed by LZ4, on HDD + i7-4790K@4.4G + DDR3 2400MHz) - - | Before | After | - | - | - | - | 50MB 0.8s | 30MB 0.2s | - | 220M 3.6s | 147M 0.7s | - | 1010M 19.1s | 690M 3.6s | - -## Usage - -* You can set compression type for manual saves and auto saves individually. -* Manual saves are compressed while using the new `Save` button. -* You can still get speed benefit while setting compression type to `None` for auto saves, and for manual saves if using the new `Save` button. -* You can decompress saves on load panel. -* Remember to backup your save(use original save button) before updating game to avoid loading failure. - -## 更新日志 - -### 1.3.5 -* 修复了选择英文和中文以外的语言时的崩溃问题。 - -### 1.3.4 -* 支持游戏版本 0.10.28.20759。 - -### 1.3.3 -* 修复压缩类型下拉框显示问题。 - -### 1.3.2 -* 在保存面板上增加设置压缩方式的UI。 -* 将按钮文本改为`压缩保存`以区分功能。 - -### 1.3.1 -* 增加在自动存档中禁用压缩的设置项。 -* 修复一个导致游戏开始后第一次保存总是使用Zstd压缩的bug。 - -### 1.3.0 -* 分离手动存档和自动存档的设置项。 -* 现在在自动存档设置压缩类型为`存储`也可以获得速度提升,手动存档也可以在使用新的`保存`按钮后获得速度提升。 - + 为此增加了`nonewrap.dll`。 -* 更新`LZ4`和`Zstd`库到最新版本。 -* `lz4wrap.dll`和`zstdwrap.dll`使用`-O3`编译而不是`-Os`,速度略有提升但体积变大。 - -### 1.2.2 -* 修复 #4,一个导致非ASCII UTF-8字符导致的bug。 -* 移除使用Harmony.UnpatchAll()以避免在BepInEx日志中出现警告。 - -### 1.2.1 -* 简化代码以在存档读取面板上显示压缩类型和`解压`按钮,使得CompressSave与其他MOD(如GalacticScale)兼容,因为它们都覆盖了`UILoadGameWindow::OnSelectedChange()`。 -* 为zstd添加了压缩等级-5到-1,现在它比lz4(实际上是lz4frame)表现更好了: - * -5比lz4更快,但压缩比略有提升。 - * -1和lz4几乎一样快,但压缩比更高。 - * 由于r2modman UI的bug,压缩等级的设置项不再限制范围。 -* 将本地的wrapper DLL移动到`x64` - -## 介绍 - -* 减少存档容量30% / 存档用时75% (LZ4压缩,测试环境:机械硬盘 + i7-4790K@4.4G + DDR3 2400MHz) - - | 原存档 | 压缩后 | - | - | - | - | 50MB 0.8s | 30MB 0.2s | - | 220M 3.6s | 147M 0.7s | - | 1010M 19.1s | 690M 3.6s | - -## 使用说明 - -* 手动和自动存档都可以分开设置压缩方式。 -* 手动存档使用新加的保存按钮即可压缩保存。 -* 即使设置为不压缩,自动存档、以及使用新加的保存按钮手动保存也可以获得速度提升。 -* 可以在读取存档面板解压存档。 -* 如果游戏有版本更新记得先备份存档(使用原保存按钮)以免更新后无法读取存档。 +#### Moved [here](https://github.com/soarqin/DSP_Mods_TO/tree/master/CompressSave) diff --git a/CompressSave/SaveUtil.cs b/CompressSave/SaveUtil.cs deleted file mode 100644 index 14695dd..0000000 --- a/CompressSave/SaveUtil.cs +++ /dev/null @@ -1,108 +0,0 @@ -using System; -using System.IO; -using BepInEx.Logging; -using CompressSave.Wrapper; - -namespace CompressSave; - -public static class SaveUtil -{ - public static ManualLogSource Logger; - - public static readonly Version VerifiedVersion = new() - { - Major = 0, - Minor = 10, - Release = 28, - }; - - private static string UnzipToFile(DecompressionStream lzStream, string fullPath) - { - lzStream.ResetStream(); - var dir = Path.GetDirectoryName(fullPath); - var filename = "[Recovery]-" + Path.GetFileNameWithoutExtension(fullPath); - fullPath = filename + GameSave.saveExt; - if (dir != null) fullPath = Path.Combine(dir, fullPath); - var i = 0; - while(File.Exists(fullPath)) - { - fullPath = $"{filename}[{i++}]{GameSave.saveExt}"; - if (dir != null) fullPath = Path.Combine(dir, fullPath); - } - var buffer = new byte[1024 * 1024]; - using (var fs = new FileStream(fullPath, FileMode.Create)) - using (var br = new BinaryWriter(fs)) - { - for (var read = lzStream.Read(buffer, 0, buffer.Length); read > 0; read = lzStream.Read(buffer, 0, buffer.Length)) - { - fs.Write(buffer, 0, read); - } - fs.Seek(6L, SeekOrigin.Begin); - br.Write(fs.Length); - - } - return Path.GetFileNameWithoutExtension(fullPath); - } - - public static bool DecompressSave(string saveName, out string newSaveName) - { - newSaveName = string.Empty; - var path = GameConfig.gameSaveFolder + saveName + GameSave.saveExt; - try - { - using var fileStream = new FileStream(path, FileMode.Open, FileAccess.Read); - var compressType = SaveGetCompressType(fileStream); - switch (compressType) - { - case CompressionType.LZ4: - case CompressionType.Zstd: - using (var lzstream = new DecompressionStream(compressType == CompressionType.LZ4 ? PatchSave.LZ4Wrapper : PatchSave.ZstdWrapper, fileStream)) - { - newSaveName = UnzipToFile(lzstream, path); - } - return true; - case CompressionType.None: - return false; - default: - throw new ArgumentOutOfRangeException(); - } - } - catch (Exception e) - { - Logger.LogError(e); - return false; - } - } - public static CompressionType SaveGetCompressType(FileStream fs) - { - for (var i = 0; i < 3; i++) - { - if (0xCC != fs.ReadByte()) - return CompressionType.None; - } - - return fs.ReadByte() switch - { - 0xCC => CompressionType.LZ4, - 0xCD => CompressionType.Zstd, - _ => CompressionType.None - }; - } - - internal static CompressionType SaveGetCompressType(string saveName) - { - if (string.IsNullOrEmpty(saveName)) return CompressionType.None; - try - { - using var fileStream = new FileStream(GetFullSavePath(saveName), FileMode.Open); - return SaveGetCompressType(fileStream); - } - catch (Exception e) - { - Logger.LogWarning(e); - return CompressionType.None; - } - } - - private static string GetFullSavePath(string saveName) => GameConfig.gameSaveFolder + saveName + GameSave.saveExt; -} \ No newline at end of file diff --git a/CompressSave/Wrapper/BlackHoleStream.cs b/CompressSave/Wrapper/BlackHoleStream.cs deleted file mode 100644 index cf9cf9d..0000000 --- a/CompressSave/Wrapper/BlackHoleStream.cs +++ /dev/null @@ -1,46 +0,0 @@ -using System; -using System.IO; - -namespace CompressSave.Wrapper; - -class BlackHoleStream : Stream -{ - private long _length; - - public override bool CanRead => true; - - public override bool CanSeek => false; - - public override bool CanWrite => true; - - public override long Length => _length; - - public override long Position { get; set; } - - public override void Flush() - { - - } - - public override int Read(byte[] buffer, int offset, int count) - { - return count; - } - - public override long Seek(long offset, SeekOrigin origin) - { - throw new NotImplementedException(); - } - - public override void SetLength(long value) - { - _length = value; - } - - private readonly byte[] _testBuffer = new byte[1024 * 1024]; - - public override void Write(byte[] buffer, int offset, int count) - { - Array.Copy(buffer, offset, _testBuffer, 0, Math.Min(count, _testBuffer.Length)); - } -} \ No newline at end of file diff --git a/CompressSave/Wrapper/BufferWriter.cs b/CompressSave/Wrapper/BufferWriter.cs deleted file mode 100644 index 7d70fec..0000000 --- a/CompressSave/Wrapper/BufferWriter.cs +++ /dev/null @@ -1,291 +0,0 @@ -using System; -using System.IO; -using System.Text; -using System.Runtime.CompilerServices; - -namespace CompressSave.Wrapper; - -public unsafe class BufferWriter : BinaryWriter -{ - private ByteSpan CurrentBuffer => _doubleBuffer.WriteBuffer; - - private readonly DoubleBuffer _doubleBuffer; - - private readonly Encoding _encoding; - - private readonly int _maxBytesPerChar; - - private byte[] Buffer => CurrentBuffer.Buffer; - - private long SuplusCapacity => _endPos - _curPos; - - private long _swapedBytes; - - public long WriteSum => _swapedBytes + _curPos - _startPos; - - public override Stream BaseStream => _baseStream; - - public override void Write(char[] chars, int index, int count) - { - if (chars == null) - { - throw new ArgumentNullException(nameof(chars)); - } - byte[] bytes = _encoding.GetBytes(chars, index, count); - Write(bytes); - } - - private byte* _curPos; - private byte* _endPos; - private byte* _startPos; - private readonly Stream _baseStream; - - public BufferWriter(DoubleBuffer doubleBuffer, CompressionStream outStream) - : this(doubleBuffer, new UTF8Encoding(encoderShouldEmitUTF8Identifier: false, throwOnInvalidBytes: true), outStream) - { - - } - - private BufferWriter(DoubleBuffer buffer , UTF8Encoding encoding, CompressionStream outStream) : base(Stream.Null, encoding) - { - _baseStream = outStream; - _swapedBytes = 0; - _doubleBuffer = buffer; - RefreshStatus(); - _encoding = encoding; - _maxBytesPerChar = _encoding.GetMaxByteCount(1); - } - - private void SwapBuffer() - { - CurrentBuffer.Position = 0; - - CurrentBuffer.Length = (int)(_curPos - _startPos); - _swapedBytes += CurrentBuffer.Length; - _doubleBuffer.SwapBuffer(); - RefreshStatus(); - } - - private void RefreshStatus() - { - _startPos = (byte*)Unsafe.AsPointer(ref Buffer[0]); - _curPos = _startPos; - _endPos = (byte*)Unsafe.AsPointer(ref Buffer[Buffer.Length - 1]) + 1; - } - - private void CheckCapacityAndSwap(int requiredCapacity) - { - if (SuplusCapacity < requiredCapacity) - { - SwapBuffer(); - } - } - - public override void Write(byte value) - { - CheckCapacityAndSwap(1); - *(_curPos++) = value; - } - - public override void Write(bool value) => Write((byte)(value ? 1 : 0)); - - protected override void Dispose(bool disposing) - { - if (disposing) - { - SwapBuffer(); - } - base.Dispose(disposing); - } - - public override void Close() - { - Dispose(disposing: true); - } - - public override void Flush() - { - SwapBuffer(); - } - - public override long Seek(int offset, SeekOrigin origin) - { - throw new NotImplementedException(); - } - - public override void Write(sbyte value) => Write((byte)value); - - public override void Write(byte[] buffer) => Write(buffer, 0, buffer.Length); - - - public override void Write(byte[] buffer, int index, int count) - { - if (buffer == null) - { - throw new ArgumentNullException(nameof(buffer)); - } - fixed (byte* start = buffer) - { - byte* srcPos = start + index; - while (SuplusCapacity <= count) - { - int dstSuplus = (int)SuplusCapacity; - //Array.Copy(_buffer, index + writed, Buffer, Position, SuplusCapacity); - Unsafe.CopyBlock(_curPos, srcPos, (uint)dstSuplus); - count -= dstSuplus; - srcPos += dstSuplus; - _curPos = _endPos; - SwapBuffer(); - } - Unsafe.CopyBlock(_curPos, srcPos, (uint)count); - _curPos += count; - } - } - - public override void Write(char ch) - { - if (char.IsSurrogate(ch)) - { - throw new ArgumentException("Arg_SurrogatesNotAllowedAsSingleChar"); - } - - CheckCapacityAndSwap(_maxBytesPerChar); - - _curPos += _encoding.GetBytes(&ch, 1, _curPos, (int)SuplusCapacity); - } - - //slow - public override void Write(char[] chars) - { - if (chars == null) - { - throw new ArgumentNullException(nameof(chars)); - } - byte[] bytes = _encoding.GetBytes(chars, 0, chars.Length); - Write(bytes); - } - - public override void Write(double value) - { - CheckCapacityAndSwap(8); - ulong num = (ulong)(*(long*)(&value)); - *(_curPos++) = (byte)num; - *(_curPos++) = (byte)(num >> 8); - *(_curPos++) = (byte)(num >> 16); - *(_curPos++) = (byte)(num >> 24); - *(_curPos++) = (byte)(num >> 32); - *(_curPos++) = (byte)(num >> 40); - *(_curPos++) = (byte)(num >> 48); - *(_curPos++) = (byte)(num >> 56); - } - - //slow - public override void Write(decimal d) - { - CheckCapacityAndSwap(16); - int[] bits = decimal.GetBits(d); - - Write(bits[0]); - Write(bits[1]); - Write(bits[2]); - Write(bits[3]); - } - - - public override void Write(short value) - { - CheckCapacityAndSwap(2); - *(_curPos++) = (byte)value; - *(_curPos++) = (byte)(value >> 8); - } - - public override void Write(ushort value) - { - CheckCapacityAndSwap(2); - *(_curPos++) = (byte)value; - *(_curPos++) = (byte)(value >> 8); - } - - - public override void Write(int value) - { - if (SuplusCapacity < 4) - { - SwapBuffer(); - } - *(_curPos++) = (byte)value; - *(_curPos++) = (byte)(value >> 8); - *(_curPos++) = (byte)(value >> 16); - *(_curPos++) = (byte)(value >> 24); - } - - public override void Write(uint value) - { - CheckCapacityAndSwap(4); - *(_curPos++) = (byte)value; - *(_curPos++) = (byte)(value >> 8); - *(_curPos++) = (byte)(value >> 16); - *(_curPos++) = (byte)(value >> 24); - } - - - public override void Write(long value) - { - CheckCapacityAndSwap(8); - *(_curPos++) = (byte)value; - *(_curPos++) = (byte)(value >> 8); - *(_curPos++) = (byte)(value >> 16); - *(_curPos++) = (byte)(value >> 24); - *(_curPos++) = (byte)(value >> 32); - *(_curPos++) = (byte)(value >> 40); - *(_curPos++) = (byte)(value >> 48); - *(_curPos++) = (byte)(value >> 56); - } - - public override void Write(ulong value) - { - CheckCapacityAndSwap(8); - *(_curPos++) = (byte)value; - *(_curPos++) = (byte)(value >> 8); - *(_curPos++) = (byte)(value >> 16); - *(_curPos++) = (byte)(value >> 24); - *(_curPos++) = (byte)(value >> 32); - *(_curPos++) = (byte)(value >> 40); - *(_curPos++) = (byte)(value >> 48); - *(_curPos++) = (byte)(value >> 56); - } - - public override void Write(float value) - { - CheckCapacityAndSwap(4); - uint num = *(uint*)(&value); - *(_curPos++) = (byte)num; - *(_curPos++) = (byte)(num >> 8); - *(_curPos++) = (byte)(num >> 16); - *(_curPos++) = (byte)(num >> 24); - } - - - // Just use same mechanisum from `Write(char[] chars, int index, int count)` - public override void Write(string value) - { - if (value == null) - { - throw new ArgumentNullException(nameof(value)); - } - byte[] bytes = _encoding.GetBytes(value); - Write7BitEncodedInt(bytes.Length); - Write(bytes); - } - - - private new void Write7BitEncodedInt(int value) - { - uint num; - for (num = (uint)value; num >= 128; num >>= 7) - { - Write((byte)(num | 0x80)); - } - Write((byte)num); - } -} \ No newline at end of file diff --git a/CompressSave/Wrapper/CompressionStream.cs b/CompressSave/Wrapper/CompressionStream.cs deleted file mode 100644 index 98789a9..0000000 --- a/CompressSave/Wrapper/CompressionStream.cs +++ /dev/null @@ -1,223 +0,0 @@ -using System; -using System.IO; -using System.Threading; - -namespace CompressSave.Wrapper; - -public class CompressionStream : Stream -{ - private readonly WrapperDefines _wrapper; - - public const int Mb = 1024 * 1024; - public override bool CanRead => false; - - public override bool CanSeek => false; - - public override bool CanWrite => true; - - public override long Length => _totalWrite; - - // only use for game statistics - public override long Position - { - get => BufferWriter.WriteSum; - set => throw new NotImplementedException(); - } - - public readonly Stream OutStream; - - private long _totalWrite; - private readonly bool _useMultiThread; - private DoubleBuffer _doubleBuffer; - - private byte[] _outBuffer; - - private IntPtr _cctx; - private long _lastError; - private bool _stopWorker = true; - - public bool HasError() - { - return _lastError != 0; - } - - private void HandleError(long errorCode) - { - if (errorCode < 0) - { - _wrapper.CompressContextFree(_cctx); - _cctx = IntPtr.Zero; - _lastError = errorCode; - throw new Exception(errorCode.ToString()); - } - } - - public struct CompressBuffer - { - public byte[] ReadBuffer; - public byte[] WriteBuffer; - public byte[] OutBuffer; - } - - public static CompressBuffer CreateBuffer(int outBufferSize, int exBufferSize = 4 * Mb) - { - try - { - return new CompressBuffer - { - OutBuffer = new byte[outBufferSize], - ReadBuffer = new byte[exBufferSize], - WriteBuffer = new byte[exBufferSize], - }; - } - catch (Exception e) - { - Console.WriteLine(e.ToString()); - } - return new CompressBuffer(); - } - - public BufferWriter BufferWriter { get; private set; } - - public CompressionStream(WrapperDefines wrap, int compressionLevel, Stream outputStream, CompressBuffer compressBuffer, bool multiThread) - { - _wrapper = wrap; - OutStream = outputStream; - InitBuffer(compressBuffer.ReadBuffer, compressBuffer.WriteBuffer, compressBuffer.OutBuffer); - var writeSize = _wrapper.CompressBegin(out _cctx, compressionLevel, _outBuffer, _outBuffer.Length); - HandleError(writeSize); - outputStream.Write(_outBuffer, 0, (int)writeSize); - _useMultiThread = multiThread; - if (!multiThread) return; - _stopWorker = false; - var compressThread = new Thread(CompressAsync); - compressThread.Start(); - } - - private void InitBuffer(byte[] readBuffer, byte[] writeBuffer, byte[] outputBuffer) - { - _doubleBuffer = new DoubleBuffer(readBuffer ?? new byte[4 * Mb], writeBuffer ?? new byte[4 * Mb], Compress); - _outBuffer = outputBuffer ?? new byte[_wrapper.CompressBufferBound(writeBuffer?.Length ?? 4 * Mb)]; - BufferWriter = new BufferWriter(_doubleBuffer,this); - } - - public override void Flush() - { - _doubleBuffer.SwapBuffer(); - if(_useMultiThread) - { - _doubleBuffer.WaitReadEnd(); - } - lock (_outBuffer) - { - OutStream.Flush(); - } - } - - private void Compress() - { - if (!_useMultiThread) - { - Compress_Internal(); - } - } - - private void Compress_Internal() - { - var consumeBuffer = _doubleBuffer.ReadBegin(); - if (consumeBuffer.Length > 0) - { - lock (_outBuffer) - { - long writeSize; - try - { - writeSize = _wrapper.CompressUpdateEx(_cctx, _outBuffer, 0, consumeBuffer.Buffer, 0, consumeBuffer.Length); - HandleError(writeSize); - } - finally - { - _doubleBuffer.ReadEnd(); - } - OutStream.Write(_outBuffer, 0, (int)writeSize); - _totalWrite += writeSize; - } - } - else - { - _doubleBuffer.ReadEnd(); - } - } - - private void CompressAsync() - { - while(!_stopWorker) - { - Compress_Internal(); - } - } - - public override int Read(byte[] buffer, int offset, int count) - { - throw new NotImplementedException(); - } - - public override long Seek(long offset, SeekOrigin origin) - { - throw new NotImplementedException(); - } - - public override void SetLength(long value) - { - throw new NotImplementedException(); - } - - - public override void Write(byte[] buffer, int offset, int count) - { - BufferWriter.Write(buffer, offset, count); - //var writeBuffer = doubleBuffer.writeBuffer; - //int writeSize = writeBuffer.Write(buffer, offset, count); - //while (count - writeSize > 0) - //{ - // SwapBuffer(ref writeBuffer); - // offset += writeSize; - // count -= writeSize; - // writeSize = writeBuffer.Write(buffer, offset, count); - //} - //inputSum += count; - } - - private void FreeContext() - { - _wrapper.CompressContextFree(_cctx); - _cctx = IntPtr.Zero; - } - - private bool _closed; - public override void Close() - { - if (_closed) return; - BufferWriter.Close(); - _closed = true; - //Console.WriteLine($"FLUSH"); - - Flush(); - - // try stop the worker - _stopWorker = true; - _doubleBuffer.SwapBuffer(); - - var size = _wrapper.CompressEnd(_cctx, _outBuffer, _outBuffer.Length); - //Debug.Log($"End"); - OutStream.Write(_outBuffer, 0, (int)size); - base.Close(); - } - - protected override void Dispose(bool disposing) - { - FreeContext(); - base.Dispose(disposing); - } - -} \ No newline at end of file diff --git a/CompressSave/Wrapper/DecompressionStream.cs b/CompressSave/Wrapper/DecompressionStream.cs deleted file mode 100644 index 11afbe1..0000000 --- a/CompressSave/Wrapper/DecompressionStream.cs +++ /dev/null @@ -1,154 +0,0 @@ -using System; -using System.IO; - -namespace CompressSave.Wrapper; - -public class DecompressionStream : Stream -{ - private readonly WrapperDefines _wrapper; - - public override bool CanRead => true; - - public override bool CanSeek => false; - - public override bool CanWrite => false; - - public override long Length => _inStream.Length; - - public override long Position - { - get => _readPos; - set - { - if (value < _readPos) - ResetStream(); - else - value -= _readPos; - var tmpBuffer = new byte[1024]; - while (value > 0) - { - value -= Read(tmpBuffer, 0, (int)(value < 1024 ? value : 1024)); - } - } - } - - private readonly Stream _inStream; - - private IntPtr _dctx = IntPtr.Zero; - - private readonly ByteSpan _srcBuffer; - private readonly ByteSpan _dcmpBuffer; - private bool _decompressFinish; - private readonly long _startPos; - private long _readPos; //sum of readlen - - public DecompressionStream(WrapperDefines wrap, Stream inputStream, int extraBufferSize = 512 * 1024) - { - _wrapper = wrap; - _inStream = inputStream; - _startPos = inputStream.Position; - _srcBuffer = new ByteSpan(new byte[extraBufferSize]); - var len = Fill(); - var expect = _wrapper.DecompressBegin(ref _dctx, _srcBuffer.Buffer, ref len, out var blockSize); - _srcBuffer.Position += len; - if (expect < 0) throw new Exception(expect.ToString()); - _dcmpBuffer = new ByteSpan(new byte[blockSize]); - } - - public void ResetStream() - { - _inStream.Seek(_startPos, SeekOrigin.Begin); - _decompressFinish = false; - _srcBuffer.Clear(); - _dcmpBuffer.Clear(); - _wrapper.DecompressContextReset(_dctx); - _readPos = 0; - } - - private int Fill() - { - var suplus = _srcBuffer.Length - _srcBuffer.Position; - if (_srcBuffer.Length > 0 && _srcBuffer.Position >= suplus) - { - Array.Copy(_srcBuffer, _srcBuffer.Position, _srcBuffer, 0, suplus); - _srcBuffer.Length -= _srcBuffer.Position; - _srcBuffer.Position = 0; - } - - if (_srcBuffer.IdleCapacity > 0) - { - var readlen = _inStream.Read(_srcBuffer, _srcBuffer.Length, _srcBuffer.IdleCapacity); - _srcBuffer.Length += readlen; - } - - return _srcBuffer.Length - _srcBuffer.Position; - } - - public override void Flush() - { - } - - protected override void Dispose(bool disposing) - { - _wrapper.DecompressEnd(_dctx); - _dctx = IntPtr.Zero; - base.Dispose(disposing); - } - - public override int Read(byte[] buffer, int offset, int count) - { - var readlen = 0; - while (count > (readlen += _dcmpBuffer.Read(buffer, offset + readlen, count - readlen)) && !_decompressFinish) - { - var buffSize = Fill(); - if (buffSize <= 0) return readlen; - - var rt = _wrapper.DecompressUpdateEx(_dctx, _dcmpBuffer, 0, _dcmpBuffer.Capacity, _srcBuffer, _srcBuffer.Position, - buffSize); - if (rt.Expect < 0) throw new Exception(rt.Expect.ToString()); - if (rt.Expect == 0) _decompressFinish = true; - - _srcBuffer.Position += (int)rt.ReadLen; - _dcmpBuffer.Position = 0; - _dcmpBuffer.Length = (int)rt.WriteLen; - } - - _readPos += readlen; - return readlen; - } - - public int PeekByte() - { - if (_dcmpBuffer.Length <= _dcmpBuffer.Position) - { - var buffSize = Fill(); - if (buffSize <= 0) return -1; - - var rt = _wrapper.DecompressUpdateEx(_dctx, _dcmpBuffer, 0, _dcmpBuffer.Capacity, _srcBuffer, _srcBuffer.Position, - buffSize); - if (rt.Expect < 0) throw new Exception(rt.Expect.ToString()); - if (rt.Expect == 0) _decompressFinish = true; - - _srcBuffer.Position += (int)rt.ReadLen; - _dcmpBuffer.Position = 0; - _dcmpBuffer.Length = (int)rt.WriteLen; - } - - return _dcmpBuffer.Buffer[_dcmpBuffer.Position]; - } - - public override long Seek(long offset, SeekOrigin origin) - { - throw new NotImplementedException(); - } - - public override void SetLength(long value) - { - throw new NotImplementedException(); - } - - public override void Write(byte[] buffer, int offset, int count) - { - throw new NotImplementedException(); - } -} \ No newline at end of file diff --git a/CompressSave/Wrapper/DoubleBuffer.cs b/CompressSave/Wrapper/DoubleBuffer.cs deleted file mode 100644 index 23514ba..0000000 --- a/CompressSave/Wrapper/DoubleBuffer.cs +++ /dev/null @@ -1,115 +0,0 @@ -using System; -using System.Threading; - -namespace CompressSave.Wrapper; - -public class ByteSpan -{ - public byte[] Buffer { get; } - //public int Start; - public int Length; - public readonly int Capacity; - public int IdleCapacity => Capacity - Length; - public int Position; - - public ByteSpan(byte[] buffer) - { - Buffer = buffer; - Capacity = Buffer.Length; - } - public void Clear() - { - Length = 0; - Position = 0; - } - public int Write(byte[] src, int offset, int count) - { - int writeLen = Math.Min(Capacity - Length, count); - Array.Copy(src, offset, Buffer, Length, writeLen); - Length += writeLen; - return writeLen; - } - - public int Read(byte[] dst, int offset, int count) - { - count = Math.Min(Length - Position, count); - Array.Copy(Buffer, Position, dst, offset, count); - Position += count; - return count; - } - - public static implicit operator byte[](ByteSpan bs) => bs.Buffer; -} - -public struct ReadOnlySpan(byte[] buffer, int length) -{ - private readonly byte[] _buffer = buffer; - private int _position = 0; - - public int Read(byte[] dst, int offset, int count) - { - count = Math.Min(length - _position, count); - Array.Copy(_buffer, _position, dst, offset, count); - _position += count; - return count; - } - - public static implicit operator byte[](ReadOnlySpan s) => s._buffer; -} - -public class DoubleBuffer(byte[] readingBuffer, byte[] writingBuffer, Action onReadBufferReadyAction) -{ - public const int Mb = 1024 * 1024; - - public ByteSpan WriteBuffer = new(writingBuffer); - private ByteSpan _readBuffer; - private ByteSpan _midBuffer = new(readingBuffer); - - private readonly Semaphore _readEnd = new Semaphore(1, 1); - private readonly Semaphore _writeEnd = new Semaphore(0, 1); - - public ByteSpan ReadBegin() - { - _writeEnd.WaitOne(); - return _readBuffer; - } - - public void ReadEnd() - { - _readBuffer.Clear(); - _midBuffer = _readBuffer; - _readBuffer = null; - _readEnd.Release(); - } - /// - /// swap current write buffer to read and wait a new write buffer - /// - /// write buffer - public ByteSpan SwapBuffer(bool triggerEvent = true) - { - var write = SwapBegin(); - SwapEnd(); - onReadBufferReadyAction?.Invoke(); - return write; - } - - public void WaitReadEnd() - { - _readEnd.WaitOne(); - _readEnd.Release(); - } - - private ByteSpan SwapBegin() - { - _readEnd.WaitOne(); - _readBuffer = WriteBuffer; - WriteBuffer = _midBuffer; - _midBuffer = null; - return WriteBuffer; - } - - private void SwapEnd() - { - _writeEnd.Release(); - } -} \ No newline at end of file diff --git a/CompressSave/Wrapper/LZ4Wrapper.cs b/CompressSave/Wrapper/LZ4Wrapper.cs deleted file mode 100644 index 406a974..0000000 --- a/CompressSave/Wrapper/LZ4Wrapper.cs +++ /dev/null @@ -1,69 +0,0 @@ -using System; -using System.Collections.Generic; -using System.IO; -using MonoMod.Utils; - -namespace CompressSave.Wrapper; - -public class LZ4API: WrapperDefines -{ - public static readonly bool Avaliable; - - static LZ4API() - { - Avaliable = true; - var assemblyPath = System.Reflection.Assembly.GetAssembly(typeof(LZ4API)).Location; - var root = string.Empty; - try - { - if (!string.IsNullOrEmpty(assemblyPath)) - { - root = Path.GetDirectoryName(assemblyPath) ?? string.Empty; - } - - var map = new Dictionary> - { - { - "lz4wrap.dll", [ - "lz4wrap.dll", - "x64/lz4wrap.dll", - "plugins/x64/lz4wrap.dll", - "BepInEx/scripts/x64/lz4wrap.dll", - Path.Combine(root, "lz4wrap.dll"), - Path.Combine(root, "x64/lz4wrap.dll"), - Path.Combine(root, "plugins/x64/lz4wrap.dll") - ] - }, - }; - typeof(LZ4API).ResolveDynDllImports(map); - } - catch (Exception e) - { - Avaliable = false; - Console.WriteLine($"Error: {e}"); - } - } - - public LZ4API() - { - CompressBufferBound = CompressBufferBound_; - CompressBegin = CompressBegin_; - CompressEnd = CompressEnd_; - CompressUpdate = CompressUpdate_; - CompressContextFree = CompressContextFree_; - DecompressBegin = DecompressBegin_; - DecompressEnd = DecompressEnd_; - DecompressUpdate = DecompressUpdate_; - DecompressContextReset = DecompressContextReset_; - } - - [DynDllImport(libraryName: "lz4wrap.dll", "CompressBufferBound")] protected static CompressBufferBoundFunc CompressBufferBound_; - [DynDllImport(libraryName: "lz4wrap.dll", "CompressBegin")] protected static CompressBeginFunc CompressBegin_; - [DynDllImport(libraryName: "lz4wrap.dll", "CompressEnd")] protected static CompressEndFunc CompressEnd_; - [DynDllImport(libraryName: "lz4wrap.dll", "CompressUpdate")] protected static CompressUpdateFunc CompressUpdate_; - [DynDllImport(libraryName: "lz4wrap.dll", "CompressContextFree")] protected static CompressContextFreeFunc CompressContextFree_; - [DynDllImport(libraryName: "lz4wrap.dll", "DecompressBegin")] protected static DecompressBeginFunc DecompressBegin_; - [DynDllImport(libraryName: "lz4wrap.dll", "DecompressEnd")] protected static DecompressEndFunc DecompressEnd_; - [DynDllImport(libraryName: "lz4wrap.dll", "DecompressUpdate")] protected static DecompressUpdateFunc DecompressUpdate_; - [DynDllImport(libraryName: "lz4wrap.dll", "DecompressContextReset")] protected static DecompressContextResetFunc DecompressContextReset_; -} diff --git a/CompressSave/Wrapper/NoneWrapper.cs b/CompressSave/Wrapper/NoneWrapper.cs deleted file mode 100644 index 16766fb..0000000 --- a/CompressSave/Wrapper/NoneWrapper.cs +++ /dev/null @@ -1,69 +0,0 @@ -using System; -using System.Collections.Generic; -using System.IO; -using MonoMod.Utils; - -namespace CompressSave.Wrapper; - -public class NoneAPI: WrapperDefines -{ - public static readonly bool Avaliable; - - static NoneAPI() - { - Avaliable = true; - string assemblyPath = System.Reflection.Assembly.GetAssembly(typeof(NoneAPI)).Location; - string root = string.Empty; - try - { - if (!string.IsNullOrEmpty(assemblyPath)) - { - root = Path.GetDirectoryName(assemblyPath) ?? string.Empty; - } - - var map = new Dictionary> - { - { - "nonewrap.dll", [ - "nonewrap.dll", - "x64/nonewrap.dll", - "plugins/x64/nonewrap.dll", - "BepInEx/scripts/x64/nonewrap.dll", - Path.Combine(root, "nonewrap.dll"), - Path.Combine(root, "x64/nonewrap.dll"), - Path.Combine(root, "plugins/x64/nonewrap.dll") - ] - }, - }; - typeof(NoneAPI).ResolveDynDllImports(map); - } - catch (Exception e) - { - Avaliable = false; - Console.WriteLine($"Error: {e}"); - } - } - - public NoneAPI() - { - CompressBufferBound = CompressBufferBound_; - CompressBegin = CompressBegin_; - CompressEnd = CompressEnd_; - CompressUpdate = CompressUpdate_; - CompressContextFree = CompressContextFree_; - DecompressBegin = DecompressBegin_; - DecompressEnd = DecompressEnd_; - DecompressUpdate = DecompressUpdate_; - DecompressContextReset = DecompressContextReset_; - } - - [DynDllImport(libraryName: "nonewrap.dll", "CompressBufferBound")] protected static CompressBufferBoundFunc CompressBufferBound_; - [DynDllImport(libraryName: "nonewrap.dll", "CompressBegin")] protected static CompressBeginFunc CompressBegin_; - [DynDllImport(libraryName: "nonewrap.dll", "CompressEnd")] protected static CompressEndFunc CompressEnd_; - [DynDllImport(libraryName: "nonewrap.dll", "CompressUpdate")] protected static CompressUpdateFunc CompressUpdate_; - [DynDllImport(libraryName: "nonewrap.dll", "CompressContextFree")] protected static CompressContextFreeFunc CompressContextFree_; - [DynDllImport(libraryName: "nonewrap.dll", "DecompressBegin")] protected static DecompressBeginFunc DecompressBegin_; - [DynDllImport(libraryName: "nonewrap.dll", "DecompressEnd")] protected static DecompressEndFunc DecompressEnd_; - [DynDllImport(libraryName: "nonewrap.dll", "DecompressUpdate")] protected static DecompressUpdateFunc DecompressUpdate_; - [DynDllImport(libraryName: "nonewrap.dll", "DecompressContextReset")] protected static DecompressContextResetFunc DecompressContextReset_; -} diff --git a/CompressSave/Wrapper/PeekableReader.cs b/CompressSave/Wrapper/PeekableReader.cs deleted file mode 100644 index 04a2a78..0000000 --- a/CompressSave/Wrapper/PeekableReader.cs +++ /dev/null @@ -1,11 +0,0 @@ -using System.IO; - -namespace CompressSave.Wrapper; - -internal class PeekableReader(DecompressionStream input) : BinaryReader(input) -{ - public override int PeekChar() - { - return input.PeekByte(); - } -} \ No newline at end of file diff --git a/CompressSave/Wrapper/WrapperDefines.cs b/CompressSave/Wrapper/WrapperDefines.cs deleted file mode 100644 index 652064a..0000000 --- a/CompressSave/Wrapper/WrapperDefines.cs +++ /dev/null @@ -1,64 +0,0 @@ -using System; - -namespace CompressSave.Wrapper; - -public struct DecompressStatus -{ - public long WriteLen; - public long ReadLen; - public long Expect; -} - -public class WrapperDefines -{ - public delegate long CompressBufferBoundFunc(long inBufferSize); - public delegate long CompressBeginFunc(out IntPtr ctx, int compressionLevel, byte[] outBuff, long outCapacity, byte[] dictBuffer = null, - long dictSize = 0); - public delegate long CompressEndFunc(IntPtr ctx, byte[] dstBuffer, long dstCapacity); - public delegate void CompressContextFreeFunc(IntPtr ctx); - public delegate long DecompressBeginFunc(ref IntPtr pdctx, byte[] inBuffer, ref int inBufferSize, out int blockSize, byte[] dict = null, long dictSize = 0); - public delegate long DecompressEndFunc(IntPtr dctx); - public delegate void DecompressContextResetFunc(IntPtr dctx); - protected unsafe delegate long CompressUpdateFunc(IntPtr ctx, byte* dstBuffer, long dstCapacity, byte* srcBuffer, - long srcSize); - protected unsafe delegate long DecompressUpdateFunc(IntPtr dctx, byte* dstBuffer, ref long dstCapacity, byte* srcBuffer, - ref long srcSize); - - public CompressBufferBoundFunc CompressBufferBound; - public CompressBeginFunc CompressBegin; - public CompressEndFunc CompressEnd; - public CompressContextFreeFunc CompressContextFree; - public DecompressBeginFunc DecompressBegin; - public DecompressEndFunc DecompressEnd; - public DecompressContextResetFunc DecompressContextReset; - protected CompressUpdateFunc CompressUpdate; - protected DecompressUpdateFunc DecompressUpdate; - - public unsafe long CompressUpdateEx(IntPtr ctx, byte[] dstBuffer, long dstOffset, byte[] srcBuffer, - long srcOffset, long srcLen) - { - fixed (byte* pdst = dstBuffer, psrc = srcBuffer) - { - return CompressUpdate(ctx, pdst + dstOffset, dstBuffer.Length - dstOffset, psrc + srcOffset, - srcLen - srcOffset); - } - } - - public unsafe DecompressStatus DecompressUpdateEx(IntPtr dctx, byte[] dstBuffer, int dstOffset, int dstCount, - byte[] srcBuffer, long srcOffset, long count) - { - long dstLen = Math.Min(dstCount, dstBuffer.Length - dstOffset); - long errCode; - fixed (byte* pdst = dstBuffer, psrc = srcBuffer) - { - errCode = DecompressUpdate(dctx, pdst + dstOffset, ref dstLen, psrc + srcOffset, ref count); - } - - return new DecompressStatus - { - Expect = errCode, - ReadLen = count, - WriteLen = dstLen, - }; - } -} diff --git a/CompressSave/Wrapper/ZstdWrapper.cs b/CompressSave/Wrapper/ZstdWrapper.cs deleted file mode 100644 index 677d7d4..0000000 --- a/CompressSave/Wrapper/ZstdWrapper.cs +++ /dev/null @@ -1,69 +0,0 @@ -using System; -using System.Collections.Generic; -using System.IO; -using MonoMod.Utils; - -namespace CompressSave.Wrapper; - -public class ZstdAPI: WrapperDefines -{ - public static readonly bool Avaliable; - - static ZstdAPI() - { - Avaliable = true; - string assemblyPath = System.Reflection.Assembly.GetAssembly(typeof(ZstdAPI)).Location; - string root = string.Empty; - try - { - if (!string.IsNullOrEmpty(assemblyPath)) - { - root = Path.GetDirectoryName(assemblyPath) ?? string.Empty; - } - - var map = new Dictionary> - { - { - "zstdwrap.dll", [ - "zstdwrap.dll", - "x64/zstdwrap.dll", - "plugins/x64/zstdwrap.dll", - "BepInEx/scripts/x64/zstdwrap.dll", - Path.Combine(root, "zstdwrap.dll"), - Path.Combine(root, "x64/zstdwrap.dll"), - Path.Combine(root, "plugins/x64/zstdwrap.dll") - ] - }, - }; - typeof(ZstdAPI).ResolveDynDllImports(map); - } - catch (Exception e) - { - Avaliable = false; - Console.WriteLine($"Error: {e}"); - } - } - - public ZstdAPI() - { - CompressBufferBound = CompressBufferBound_; - CompressBegin = CompressBegin_; - CompressEnd = CompressEnd_; - CompressUpdate = CompressUpdate_; - CompressContextFree = CompressContextFree_; - DecompressBegin = DecompressBegin_; - DecompressEnd = DecompressEnd_; - DecompressUpdate = DecompressUpdate_; - DecompressContextReset = DecompressContextReset_; - } - - [DynDllImport(libraryName: "zstdwrap.dll", "CompressBufferBound")] protected static CompressBufferBoundFunc CompressBufferBound_; - [DynDllImport(libraryName: "zstdwrap.dll", "CompressBegin")] protected static CompressBeginFunc CompressBegin_; - [DynDllImport(libraryName: "zstdwrap.dll", "CompressEnd")] protected static CompressEndFunc CompressEnd_; - [DynDllImport(libraryName: "zstdwrap.dll", "CompressUpdate")] protected static CompressUpdateFunc CompressUpdate_; - [DynDllImport(libraryName: "zstdwrap.dll", "CompressContextFree")] protected static CompressContextFreeFunc CompressContextFree_; - [DynDllImport(libraryName: "zstdwrap.dll", "DecompressBegin")] protected static DecompressBeginFunc DecompressBegin_; - [DynDllImport(libraryName: "zstdwrap.dll", "DecompressEnd")] protected static DecompressEndFunc DecompressEnd_; - [DynDllImport(libraryName: "zstdwrap.dll", "DecompressUpdate")] protected static DecompressUpdateFunc DecompressUpdate_; - [DynDllImport(libraryName: "zstdwrap.dll", "DecompressContextReset")] protected static DecompressContextResetFunc DecompressContextReset_; -} diff --git a/CompressSave/ZstdWrapC/CMakeLists.txt b/CompressSave/ZstdWrapC/CMakeLists.txt deleted file mode 100644 index 98c364c..0000000 --- a/CompressSave/ZstdWrapC/CMakeLists.txt +++ /dev/null @@ -1,17 +0,0 @@ -cmake_minimum_required(VERSION 3.2) - -project(zstdwrap) - -add_library(zstdwrap SHARED - dllmain.c zstdwrap.c zstdwrap.h) - -target_compile_definitions(zstdwrap PRIVATE ZSTDWRAP_EXPORTS ZSTDLIB_STATIC_API) -if(MSVC) - target_compile_options(zstdwrap PRIVATE /MT) -else() - target_link_options(zstdwrap PRIVATE -static) -endif() -target_link_libraries(zstdwrap PRIVATE zstd) -if(WIN32) - set_target_properties(zstdwrap PROPERTIES PREFIX "") -endif() diff --git a/CompressSave/ZstdWrapC/dllmain.c b/CompressSave/ZstdWrapC/dllmain.c deleted file mode 100644 index 896c9b5..0000000 --- a/CompressSave/ZstdWrapC/dllmain.c +++ /dev/null @@ -1,20 +0,0 @@ -// dllmain.cpp : Defines the entry point for the DLL application. -#include - -BOOL APIENTRY DllMain( HMODULE hModule, - DWORD ul_reason_for_call, - LPVOID lpReserved - ) -{ - switch (ul_reason_for_call) - { - case DLL_PROCESS_ATTACH: - DisableThreadLibraryCalls(hModule); - break; - case DLL_THREAD_ATTACH: - case DLL_THREAD_DETACH: - case DLL_PROCESS_DETACH: - break; - } - return TRUE; -} diff --git a/CompressSave/ZstdWrapC/zstdwrap.c b/CompressSave/ZstdWrapC/zstdwrap.c deleted file mode 100644 index c28c539..0000000 --- a/CompressSave/ZstdWrapC/zstdwrap.c +++ /dev/null @@ -1,84 +0,0 @@ -#include "zstdwrap.h" - -#include -#include -#include -#include - -size_t __stdcall CompressBufferBound(size_t inBufferSize) -{ - return ZSTD_COMPRESSBOUND(inBufferSize); -} - -size_t __stdcall CompressBegin(ZSTD_CStream** pctx, int compressionLevel, void* outBuff, size_t outCapacity, void* dict, size_t dictSize) -{ - ZSTD_CStream *ctx = ZSTD_createCStream(); - if (ctx == NULL) return -1; - ZSTD_CCtx_setParameter(ctx, ZSTD_c_compressionLevel, compressionLevel); - if (dict) - { - ZSTD_CCtx_loadDictionary(ctx, dict, dictSize); - } - *pctx = ctx; - return 0; -} - - -size_t __stdcall CompressUpdate(ZSTD_CStream* ctx,void* dstBuffer, size_t dstCapacity,const void* srcBuffer, size_t srcSize) -{ - ZSTD_outBuffer obuf = {dstBuffer, dstCapacity, 0}; - ZSTD_inBuffer ibuf = {srcBuffer, srcSize, 0}; - do - { - ZSTD_compressStream2(ctx, &obuf, &ibuf, ZSTD_e_continue); - } - while (ibuf.pos < ibuf.size); - return obuf.pos; -} - -size_t __stdcall CompressEnd(ZSTD_CStream* ctx, void* dstBuffer, size_t dstCapacity) -{ - ZSTD_outBuffer obuf = {dstBuffer, dstCapacity, 0}; - ZSTD_inBuffer ibuf = {NULL, 0, 0}; - while (ZSTD_compressStream2(ctx, &obuf, &ibuf, ZSTD_e_end) > 0) {} - return obuf.pos; -} - -void __stdcall CompressContextFree(ZSTD_CStream* ctx) -{ - ZSTD_freeCStream(ctx); -} - -size_t __stdcall DecompressBegin(ZSTD_DStream **pdctx,void *inBuffer,size_t *inBufferSize, size_t *blockSize, void* dict, size_t dictSize) -{ - ZSTD_DStream *ctx = ZSTD_createDStream(); - if (ctx == NULL) return -1; - if (dict) - { - ZSTD_DCtx_loadDictionary(ctx, dict, dictSize); - } - *pdctx = ctx; - *inBufferSize = 0; - *blockSize = ZSTD_DStreamOutSize() << 2; - return 0; -} - -void __stdcall DecompressContextReset(ZSTD_DStream* dctx) -{ - ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only); -} - -size_t __stdcall DecompressUpdate(ZSTD_DStream* dctx, void* outBuffer, size_t * outBufferSize, void* inBuffer, size_t * inBufferSize) -{ - ZSTD_outBuffer obuf = {outBuffer, *outBufferSize, 0}; - ZSTD_inBuffer ibuf = {inBuffer, *inBufferSize, 0}; - size_t r = ZSTD_decompressStream(dctx, &obuf, &ibuf); - *outBufferSize = obuf.pos; - *inBufferSize = ibuf.pos; - return r; -} - -size_t __stdcall DecompressEnd(ZSTD_DStream* ctx) -{ - return ZSTD_freeDStream(ctx); -} \ No newline at end of file diff --git a/CompressSave/ZstdWrapC/zstdwrap.h b/CompressSave/ZstdWrapC/zstdwrap.h deleted file mode 100644 index 2f36b1a..0000000 --- a/CompressSave/ZstdWrapC/zstdwrap.h +++ /dev/null @@ -1,34 +0,0 @@ -#pragma once - -#include - -#include - -#if defined(__cplusplus) -#define API_EXTERN_C extern "C" -#else -#define API_EXTERN_C -#endif -#ifdef ZSTDWRAP_EXPORTS -#define ZSTDAPI API_EXTERN_C __declspec(dllexport) -#else -#define ZSTDAPI API_EXTERN_C __declspec(dllimport) -#endif - -ZSTDAPI void __stdcall CompressContextFree(ZSTD_CStream* ctx); - -ZSTDAPI size_t __stdcall CompressBufferBound(size_t inBufferSize); - -ZSTDAPI size_t __stdcall CompressBegin(ZSTD_CStream** ctx, int compressionLevel, void* outBuff, size_t outCapacity, void* dict, size_t dictSize); - -ZSTDAPI size_t __stdcall CompressUpdate(ZSTD_CStream* ctx, void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize); - -ZSTDAPI size_t __stdcall CompressEnd(ZSTD_CStream* ctx, void* dstBuffer, size_t dstCapacity); - -ZSTDAPI size_t __stdcall DecompressBegin(ZSTD_DStream** pdctx, void* inBuffer, size_t* inBufferSize, size_t* blockSize, void* dict, size_t dictSize); - -ZSTDAPI void __stdcall DecompressContextReset(ZSTD_DStream* dctx); - -ZSTDAPI size_t __stdcall DecompressUpdate(ZSTD_DStream* dctx, void* outBuffer, size_t* outBufferSize, void* inBuffer, size_t* inBufferSize); - -ZSTDAPI size_t __stdcall DecompressEnd(ZSTD_DStream* dctx); diff --git a/CompressSave/package/icon.png b/CompressSave/package/icon.png deleted file mode 100644 index 0fee584..0000000 Binary files a/CompressSave/package/icon.png and /dev/null differ diff --git a/CompressSave/package/manifest.json b/CompressSave/package/manifest.json deleted file mode 100644 index a7515b3..0000000 --- a/CompressSave/package/manifest.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "name": "CompressSave", - "version_number": "1.3.5", - "website_url": "https://github.com/soarqin/DSP_Mods/tree/master/CompressSave", - "description": "Compress game saves to reduce space use and boost save speed / 压缩游戏存档以降低空间使用并提升保存速度", - "dependencies": ["xiaoye97-BepInEx-5.4.17"] -} \ No newline at end of file diff --git a/CompressSave/package/plugins/x64/lz4wrap.dll b/CompressSave/package/plugins/x64/lz4wrap.dll deleted file mode 100644 index 28b26b9..0000000 Binary files a/CompressSave/package/plugins/x64/lz4wrap.dll and /dev/null differ diff --git a/CompressSave/package/plugins/x64/nonewrap.dll b/CompressSave/package/plugins/x64/nonewrap.dll deleted file mode 100644 index 846e38f..0000000 Binary files a/CompressSave/package/plugins/x64/nonewrap.dll and /dev/null differ diff --git a/CompressSave/package/plugins/x64/zstdwrap.dll b/CompressSave/package/plugins/x64/zstdwrap.dll deleted file mode 100644 index 2f1e2fc..0000000 Binary files a/CompressSave/package/plugins/x64/zstdwrap.dll and /dev/null differ diff --git a/DSP_Mods.sln b/DSP_Mods.sln index ce746d6..f1fa876 100644 --- a/DSP_Mods.sln +++ b/DSP_Mods.sln @@ -7,8 +7,9 @@ EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "HideTips", "HideTips\HideTips.csproj", "{4EABD71D-477F-448B-801B-48F8745A3FA7}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Dustbin", "Dustbin\Dustbin.csproj", "{931F0230-5941-4E49-AB19-66921AF14096}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CompressSave", "CompressSave\CompressSave.csproj", "{A283A918-207F-4DD7-A098-F99EBE610558}" + ProjectSection(ProjectDependencies) = postProject + {B8EB3D8D-5613-42F0-9040-EAA11A38C6AC} = {B8EB3D8D-5613-42F0-9040-EAA11A38C6AC} + EndProjectSection EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "OCBatchBuild", "OCBatchBuild\OCBatchBuild.csproj", "{E8FB30A0-29BF-4CF0-8E08-9784962A8656}" EndProject @@ -55,10 +56,6 @@ Global {931F0230-5941-4E49-AB19-66921AF14096}.Debug|Any CPU.Build.0 = Debug|Any CPU {931F0230-5941-4E49-AB19-66921AF14096}.Release|Any CPU.ActiveCfg = Release|Any CPU {931F0230-5941-4E49-AB19-66921AF14096}.Release|Any CPU.Build.0 = Release|Any CPU - {A283A918-207F-4DD7-A098-F99EBE610558}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {A283A918-207F-4DD7-A098-F99EBE610558}.Debug|Any CPU.Build.0 = Debug|Any CPU - {A283A918-207F-4DD7-A098-F99EBE610558}.Release|Any CPU.ActiveCfg = Release|Any CPU - {A283A918-207F-4DD7-A098-F99EBE610558}.Release|Any CPU.Build.0 = Release|Any CPU {E8FB30A0-29BF-4CF0-8E08-9784962A8656}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {E8FB30A0-29BF-4CF0-8E08-9784962A8656}.Debug|Any CPU.Build.0 = Debug|Any CPU {E8FB30A0-29BF-4CF0-8E08-9784962A8656}.Release|Any CPU.ActiveCfg = Release|Any CPU diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d76e015 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022-2023 Soar Qin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index 5375461..352638d 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,7 @@ Add various cheat functions while disabling abnormal determinants # [CompressSave](CompressSave) -Compress game saves to reduce space use and boost save speed -压缩游戏存档以降低空间使用并提升保存速度 +Moved [here](https://github.com/soarqin/DSP_Mods_TO/tree/master/CompressSave) # [LogisticMiner](LogisticMiner)